From 3884a76abc026228e2a7639b8fedd7206c131a35 Mon Sep 17 00:00:00 2001 From: Joohwi Lee Date: Wed, 10 Dec 2025 23:08:19 -0800 Subject: [PATCH 1/3] feat: initialize kiana package with comprehensive tooling infrastructure - Add TypeScript configuration and build setup - Implement core CLI interface with commander.js - Create session management system for tracking agent interactions - Add comprehensive tool system with support for: - File operations (read, write, edit, list, glob, grep) - System operations (bash commands) - Web operations (web search, web fetch) - Code search and analysis (codesearch) - Task management and todo tracking - Implement event-driven architecture for session tracking - Add provider system for LLM integration - Include configuration management with JSONC support - Add environment configuration with .env.local - Set up proper gitignore for Node.js/TypeScript project - Include comprehensive type definitions for all components - Add utility modules for ripgrep integration This establishes the foundation for a sophisticated coding agent system with robust tooling, session tracking, and extensible architecture. --- bun.lock | 62 +- packages/kiana/.env.local | 33 + packages/kiana/.gitignore | 118 ++++ packages/kiana/kiana.jsonc | 8 + packages/kiana/package.json | 39 ++ packages/kiana/src/cli.ts | 416 +++++++++++ packages/kiana/src/config.ts | 102 +++ packages/kiana/src/event.ts | 111 +++ packages/kiana/src/index.ts | 63 ++ packages/kiana/src/provider.ts | 54 ++ packages/kiana/src/session.ts | 954 ++++++++++++++++++++++++++ packages/kiana/src/tool/bash.ts | 252 +++++++ packages/kiana/src/tool/codesearch.ts | 135 ++++ packages/kiana/src/tool/edit.ts | 648 +++++++++++++++++ packages/kiana/src/tool/glob.ts | 78 +++ packages/kiana/src/tool/grep.ts | 113 +++ packages/kiana/src/tool/index.ts | 82 +++ packages/kiana/src/tool/invalid.ts | 21 + packages/kiana/src/tool/list.ts | 115 ++++ packages/kiana/src/tool/read.ts | 259 +++++++ packages/kiana/src/tool/task.ts | 164 +++++ packages/kiana/src/tool/todo.ts | 109 +++ packages/kiana/src/tool/tool.ts | 65 ++ packages/kiana/src/tool/webfetch.ts | 242 +++++++ packages/kiana/src/tool/websearch.ts | 145 ++++ packages/kiana/src/tool/write.ts | 53 ++ packages/kiana/src/types/event.ts | 184 +++++ packages/kiana/src/types/index.ts | 5 + packages/kiana/src/types/message.ts | 96 +++ packages/kiana/src/types/part.ts | 229 +++++++ packages/kiana/src/types/session.ts | 57 ++ packages/kiana/src/util/ripgrep.ts | 220 ++++++ packages/kiana/test-config.jsonc | 9 + packages/kiana/tsconfig.json | 21 + 34 files changed, 5261 insertions(+), 1 deletion(-) create mode 100644 packages/kiana/.env.local create mode 100644 packages/kiana/.gitignore create mode 100644 packages/kiana/kiana.jsonc create mode 100644 packages/kiana/package.json create mode 100644 packages/kiana/src/cli.ts create mode 100644 packages/kiana/src/config.ts create mode 100644 packages/kiana/src/event.ts create mode 100644 packages/kiana/src/index.ts create mode 100644 packages/kiana/src/provider.ts create mode 100644 packages/kiana/src/session.ts create mode 100644 packages/kiana/src/tool/bash.ts create mode 100644 packages/kiana/src/tool/codesearch.ts create mode 100644 packages/kiana/src/tool/edit.ts create mode 100644 packages/kiana/src/tool/glob.ts create mode 100644 packages/kiana/src/tool/grep.ts create mode 100644 packages/kiana/src/tool/index.ts create mode 100644 packages/kiana/src/tool/invalid.ts create mode 100644 packages/kiana/src/tool/list.ts create mode 100644 packages/kiana/src/tool/read.ts create mode 100644 packages/kiana/src/tool/task.ts create mode 100644 packages/kiana/src/tool/todo.ts create mode 100644 packages/kiana/src/tool/tool.ts create mode 100644 packages/kiana/src/tool/webfetch.ts create mode 100644 packages/kiana/src/tool/websearch.ts create mode 100644 packages/kiana/src/tool/write.ts create mode 100644 packages/kiana/src/types/event.ts create mode 100644 packages/kiana/src/types/index.ts create mode 100644 packages/kiana/src/types/message.ts create mode 100644 packages/kiana/src/types/part.ts create mode 100644 packages/kiana/src/types/session.ts create mode 100644 packages/kiana/src/util/ripgrep.ts create mode 100644 packages/kiana/test-config.jsonc create mode 100644 packages/kiana/tsconfig.json diff --git a/bun.lock b/bun.lock index cc6aa0a04ba..2b785d75325 100644 --- a/bun.lock +++ b/bun.lock @@ -208,6 +208,26 @@ "typescript": "catalog:", }, }, + "packages/kiana": { + "name": "kiana", + "version": "0.1.0", + "bin": { + "kiana": "./dist/cli.js", + }, + "dependencies": { + "@ai-sdk/anthropic": "beta", + "@ai-sdk/google": "beta", + "@ai-sdk/openai": "beta", + "@ai-sdk/openai-compatible": "beta", + "ai": "beta", + "jsonc-parser": "latest", + "zod": "latest", + }, + "devDependencies": { + "@types/node": "latest", + "typescript": "latest", + }, + }, "packages/memsh-cli": { "name": "@opencode-ai/memsh-cli", "version": "0.1.0", @@ -523,7 +543,7 @@ "@ai-sdk/gateway": ["@ai-sdk/gateway@2.0.12", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.17", "@vercel/oidc": "3.0.5" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-W+cB1sOWvPcz9qiIsNtD+HxUrBUva2vWv2K1EFukuImX+HA0uZx3EyyOjhYQ9gtf/teqEG80M6OvJ7xx/VLV2A=="], - "@ai-sdk/google": ["@ai-sdk/google@2.0.44", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.18" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-c5dck36FjqiVoeeMJQLTEmUheoURcGTU/nBT6iJu8/nZiKFT/y8pD85KMDRB7RerRYaaQOtslR2d6/5PditiRw=="], + "@ai-sdk/google": ["@ai-sdk/google@3.0.0-beta.69", "", { "dependencies": { "@ai-sdk/provider": "3.0.0-beta.26", "@ai-sdk/provider-utils": "4.0.0-beta.46" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-KQSZYlj3DuDspvH0WGfPueQDFg6i4YUCEK02ErUzO1Cki3S5hJgywRcwVEZl6ozdsmHWvM8XGQ4yeSUmYn0kxw=="], "@ai-sdk/google-vertex": ["@ai-sdk/google-vertex@3.0.81", "", { "dependencies": { "@ai-sdk/anthropic": "2.0.50", "@ai-sdk/google": "2.0.44", "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.18", "google-auth-library": "^9.15.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-yrl5Ug0Mqwo9ya45oxczgy2RWgpEA/XQQCSFYP+3NZMQ4yA3Iim1vkOjVCsGaZZ8rjVk395abi1ZMZV0/6rqVA=="], @@ -2749,6 +2769,8 @@ "jwt-decode": ["jwt-decode@3.1.2", "", {}, "sha512-UfpWE/VZn0iP50d8cz9NrZLM9lSWhcJ+0Gt/nm4by88UL+J1SiKN8/5dkjMmbEzwL2CAe+67GsegCbIKtbp75A=="], + "kiana": ["kiana@workspace:packages/kiana"], + "kind-of": ["kind-of@6.0.3", "", {}, "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw=="], "kleur": ["kleur@4.1.5", "", {}, "sha512-o+NO+8WrRiQEE4/7nwRJhN1HWpVmJm511pBHUxPLtp0BUISzlBplORYSmTclCnJvQq2tKu/sgl3xVpkc7ZWuQQ=="], @@ -3887,8 +3909,14 @@ "@ai-sdk/gateway/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.17", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-TR3Gs4I3Tym4Ll+EPdzRdvo/rc8Js6c4nVhFLuvGLX/Y4V9ZcQMa/HTiYsHEgmYrf1zVi6Q145UEZUfleOwOjw=="], + "@ai-sdk/google/@ai-sdk/provider": ["@ai-sdk/provider@3.0.0-beta.26", "", { "dependencies": { "json-schema": "^0.4.0" } }, "sha512-UQyOlrpahFL1CZ/QA0ZpFhAkE32fw1XXBx+6gu23YWSCMJCjaf/fiJUPV7xUhp/nXqVO/IC+PIIfLomx55D16A=="], + + "@ai-sdk/google/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@4.0.0-beta.46", "", { "dependencies": { "@ai-sdk/provider": "3.0.0-beta.26", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "@valibot/to-json-schema": "^1.3.0", "arktype": "^2.1.22", "effect": "^3.18.4", "zod": "^3.25.76 || ^4.1.8" }, "optionalPeers": ["@valibot/to-json-schema", "arktype", "effect"] }, "sha512-kVCohWGiqR3SkLNMSwW3vOVFUC/g75+3/4me6u1TH4Zv7/Y6kPIU/3wI9OyrsTx3mqsd2SdG6gKzz1AMHwoupQ=="], + "@ai-sdk/google-vertex/@ai-sdk/anthropic": ["@ai-sdk/anthropic@2.0.50", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.18" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-21PaHfoLmouOXXNINTsZJsMw+wE5oLR2He/1kq/sKokTVKyq7ObGT1LDk6ahwxaz/GoaNaGankMh+EgVcdv2Cw=="], + "@ai-sdk/google-vertex/@ai-sdk/google": ["@ai-sdk/google@2.0.44", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.18" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-c5dck36FjqiVoeeMJQLTEmUheoURcGTU/nBT6iJu8/nZiKFT/y8pD85KMDRB7RerRYaaQOtslR2d6/5PditiRw=="], + "@ai-sdk/mcp/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.17", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-TR3Gs4I3Tym4Ll+EPdzRdvo/rc8Js6c4nVhFLuvGLX/Y4V9ZcQMa/HTiYsHEgmYrf1zVi6Q145UEZUfleOwOjw=="], "@ai-sdk/openai/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.0", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.3", "zod-to-json-schema": "^3.24.1" }, "peerDependencies": { "zod": "^3.25.76 || ^4" } }, "sha512-BoQZtGcBxkeSH1zK+SRYNDtJPIPpacTeiMZqnG4Rv6xXjEwM0FH4MGs9c+PlhyEWmQCzjRM2HAotEydFhD4dYw=="], @@ -4243,6 +4271,18 @@ "jsonwebtoken/jws": ["jws@3.2.2", "", { "dependencies": { "jwa": "^1.4.1", "safe-buffer": "^5.0.1" } }, "sha512-YHlZCB6lMTllWDtSPHz/ZXTsi8S00usEV6v1tjq8tOUZzw7DpSDWVXjXDre6ed1w/pd495ODpHZYSdkRTsa0HA=="], + "kiana/@ai-sdk/anthropic": ["@ai-sdk/anthropic@3.0.0-beta.81", "", { "dependencies": { "@ai-sdk/provider": "3.0.0-beta.26", "@ai-sdk/provider-utils": "4.0.0-beta.46" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-ItrE4+G6CzNs41s5E+Velx/HZng9AmecZ1OkLZxF+WUYvvk31nrjBUZgsggLuxJX91RRFqxtgLz8YrR70g7oNw=="], + + "kiana/@ai-sdk/openai": ["@ai-sdk/openai@3.0.0-beta.90", "", { "dependencies": { "@ai-sdk/provider": "3.0.0-beta.26", "@ai-sdk/provider-utils": "4.0.0-beta.46" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-5q0PKRShOPtkkRjaqUrMEUl0FXhFak/e4ldnZxoJ53BMhNNe0/MbTXuqztZBNLCiG1jSLAZRZfqY/Wj75cNIjQ=="], + + "kiana/@ai-sdk/openai-compatible": ["@ai-sdk/openai-compatible@2.0.0-beta.47", "", { "dependencies": { "@ai-sdk/provider": "3.0.0-beta.26", "@ai-sdk/provider-utils": "4.0.0-beta.46" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-S4VbovR3WjWHF5tmNz6KQYnpP3+VSBrsLd+5uewlpv/7dr4BpQuunpMNr/D4G3GogbVRDfnmYPSJ8OKxBb7Veg=="], + + "kiana/ai": ["ai@6.0.0-beta.144", "", { "dependencies": { "@ai-sdk/gateway": "2.0.0-beta.75", "@ai-sdk/provider": "3.0.0-beta.26", "@ai-sdk/provider-utils": "4.0.0-beta.46", "@opentelemetry/api": "1.9.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-6+RP0ZCmfYp0omEafUZXj4c466dVR5IpO1TSr4zCwno6Lir+M/SQmD4Vioz3uayC6TWJZWcI04mflGEUcMfb6g=="], + + "kiana/typescript": ["typescript@5.9.3", "", { "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" } }, "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw=="], + + "kiana/zod": ["zod@4.1.13", "", {}, "sha512-AvvthqfqrAhNH9dnfmrfKzX5upOdjUVJYFqNSlkmGf64gRaTzlPwz99IHYnVs28qYAybvAlBV+H7pn0saFY4Ig=="], + "lazystream/readable-stream": ["readable-stream@2.3.8", "", { "dependencies": { "core-util-is": "~1.0.0", "inherits": "~2.0.3", "isarray": "~1.0.0", "process-nextick-args": "~2.0.0", "safe-buffer": "~5.1.1", "string_decoder": "~1.1.1", "util-deprecate": "~1.0.1" } }, "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA=="], "md-to-react-email/marked": ["marked@7.0.4", "", { "bin": { "marked": "bin/marked.js" } }, "sha512-t8eP0dXRJMtMvBojtkcsA7n48BkauktUKzfkPSCq85ZMTJ0v76Rke4DYz01omYpPTUh4p/f7HePgRo3ebG8+QQ=="], @@ -4267,6 +4307,8 @@ "opencode/@ai-sdk/anthropic": ["@ai-sdk/anthropic@2.0.50", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.18" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-21PaHfoLmouOXXNINTsZJsMw+wE5oLR2He/1kq/sKokTVKyq7ObGT1LDk6ahwxaz/GoaNaGankMh+EgVcdv2Cw=="], + "opencode/@ai-sdk/google": ["@ai-sdk/google@2.0.44", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.18" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-c5dck36FjqiVoeeMJQLTEmUheoURcGTU/nBT6iJu8/nZiKFT/y8pD85KMDRB7RerRYaaQOtslR2d6/5PditiRw=="], + "opencode/@ai-sdk/openai": ["@ai-sdk/openai@2.0.71", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.17" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-tg+gj+R0z/On9P4V7hy7/7o04cQPjKGayMCL3gzWD/aNGjAKkhEnaocuNDidSnghizt8g2zJn16cAuAolnW+qQ=="], "opencode/@ai-sdk/openai-compatible": ["@ai-sdk/openai-compatible@1.0.27", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.17" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-bpYruxVLhrTbVH6CCq48zMJNeHu6FmHtEedl9FXckEgcIEAi036idFhJlcRwC1jNCwlacbzb8dPD7OAH1EKJaQ=="], @@ -4849,6 +4891,24 @@ "jsonwebtoken/jws/jwa": ["jwa@1.4.2", "", { "dependencies": { "buffer-equal-constant-time": "^1.0.1", "ecdsa-sig-formatter": "1.0.11", "safe-buffer": "^5.0.1" } }, "sha512-eeH5JO+21J78qMvTIDdBXidBd6nG2kZjg5Ohz/1fpa28Z4CcsWUzJ1ZZyFq/3z3N17aZy+ZuBoHljASbL1WfOw=="], + "kiana/@ai-sdk/anthropic/@ai-sdk/provider": ["@ai-sdk/provider@3.0.0-beta.26", "", { "dependencies": { "json-schema": "^0.4.0" } }, "sha512-UQyOlrpahFL1CZ/QA0ZpFhAkE32fw1XXBx+6gu23YWSCMJCjaf/fiJUPV7xUhp/nXqVO/IC+PIIfLomx55D16A=="], + + "kiana/@ai-sdk/anthropic/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@4.0.0-beta.46", "", { "dependencies": { "@ai-sdk/provider": "3.0.0-beta.26", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "@valibot/to-json-schema": "^1.3.0", "arktype": "^2.1.22", "effect": "^3.18.4", "zod": "^3.25.76 || ^4.1.8" }, "optionalPeers": ["@valibot/to-json-schema", "arktype", "effect"] }, "sha512-kVCohWGiqR3SkLNMSwW3vOVFUC/g75+3/4me6u1TH4Zv7/Y6kPIU/3wI9OyrsTx3mqsd2SdG6gKzz1AMHwoupQ=="], + + "kiana/@ai-sdk/openai/@ai-sdk/provider": ["@ai-sdk/provider@3.0.0-beta.26", "", { "dependencies": { "json-schema": "^0.4.0" } }, "sha512-UQyOlrpahFL1CZ/QA0ZpFhAkE32fw1XXBx+6gu23YWSCMJCjaf/fiJUPV7xUhp/nXqVO/IC+PIIfLomx55D16A=="], + + "kiana/@ai-sdk/openai/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@4.0.0-beta.46", "", { "dependencies": { "@ai-sdk/provider": "3.0.0-beta.26", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "@valibot/to-json-schema": "^1.3.0", "arktype": "^2.1.22", "effect": "^3.18.4", "zod": "^3.25.76 || ^4.1.8" }, "optionalPeers": ["@valibot/to-json-schema", "arktype", "effect"] }, "sha512-kVCohWGiqR3SkLNMSwW3vOVFUC/g75+3/4me6u1TH4Zv7/Y6kPIU/3wI9OyrsTx3mqsd2SdG6gKzz1AMHwoupQ=="], + + "kiana/@ai-sdk/openai-compatible/@ai-sdk/provider": ["@ai-sdk/provider@3.0.0-beta.26", "", { "dependencies": { "json-schema": "^0.4.0" } }, "sha512-UQyOlrpahFL1CZ/QA0ZpFhAkE32fw1XXBx+6gu23YWSCMJCjaf/fiJUPV7xUhp/nXqVO/IC+PIIfLomx55D16A=="], + + "kiana/@ai-sdk/openai-compatible/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@4.0.0-beta.46", "", { "dependencies": { "@ai-sdk/provider": "3.0.0-beta.26", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "@valibot/to-json-schema": "^1.3.0", "arktype": "^2.1.22", "effect": "^3.18.4", "zod": "^3.25.76 || ^4.1.8" }, "optionalPeers": ["@valibot/to-json-schema", "arktype", "effect"] }, "sha512-kVCohWGiqR3SkLNMSwW3vOVFUC/g75+3/4me6u1TH4Zv7/Y6kPIU/3wI9OyrsTx3mqsd2SdG6gKzz1AMHwoupQ=="], + + "kiana/ai/@ai-sdk/gateway": ["@ai-sdk/gateway@2.0.0-beta.75", "", { "dependencies": { "@ai-sdk/provider": "3.0.0-beta.26", "@ai-sdk/provider-utils": "4.0.0-beta.46", "@vercel/oidc": "3.0.5" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-hxfPJLt80SHEuSmhlcbtgextRSH7aHAySWaN2CVuPASfqidigTEsOt+4t0/9Xqf7v3I5lHqlgMEbqfhzc7eGOQ=="], + + "kiana/ai/@ai-sdk/provider": ["@ai-sdk/provider@3.0.0-beta.26", "", { "dependencies": { "json-schema": "^0.4.0" } }, "sha512-UQyOlrpahFL1CZ/QA0ZpFhAkE32fw1XXBx+6gu23YWSCMJCjaf/fiJUPV7xUhp/nXqVO/IC+PIIfLomx55D16A=="], + + "kiana/ai/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@4.0.0-beta.46", "", { "dependencies": { "@ai-sdk/provider": "3.0.0-beta.26", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "@valibot/to-json-schema": "^1.3.0", "arktype": "^2.1.22", "effect": "^3.18.4", "zod": "^3.25.76 || ^4.1.8" }, "optionalPeers": ["@valibot/to-json-schema", "arktype", "effect"] }, "sha512-kVCohWGiqR3SkLNMSwW3vOVFUC/g75+3/4me6u1TH4Zv7/Y6kPIU/3wI9OyrsTx3mqsd2SdG6gKzz1AMHwoupQ=="], + "lazystream/readable-stream/safe-buffer": ["safe-buffer@5.1.2", "", {}, "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g=="], "lazystream/readable-stream/string_decoder": ["string_decoder@1.1.1", "", { "dependencies": { "safe-buffer": "~5.1.0" } }, "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg=="], diff --git a/packages/kiana/.env.local b/packages/kiana/.env.local new file mode 100644 index 00000000000..6544813b1c4 --- /dev/null +++ b/packages/kiana/.env.local @@ -0,0 +1,33 @@ +ARK_MODEL_ID=ep-20250821060450-4bc6g +ARK_API_KEY=23ed9d5c-c634-4cc1-9e70-9c9ac63a17ef +ARK_BASE_URL=https://ark-ap-southeast.byteintl.net/api/v3 +OPENWEATHERMAP_API_KEY=fe594593009f3d26a77f9bad447b9f9e + +# VFS Configuration +VFS_BASE_URL=https://cnog-vfs.byted.org + +# Lark (飞书) Configuration +LARK_APP_ID=cli_a34df2c15838d00d +LARK_APP_SECRET=I68GxbynU2FhdXw7HZUeeDChRb5zLDD1 + +# Public variables (for frontend) +NEXT_PUBLIC_LARK_APP_ID=cli_a34df2c15838d00d + +# ByteCloud SSO Configuration +# Domain ID for ByteCloud JWT service (default: boe-i18n) +# Options: boe-i18n, i18n, online, cn, ttp-row, ttp-us, etc. +BYTECLOUD_DOMAIN_ID=boe-i18n +NEXT_PUBLIC_BYTECLOUD_DOMAIN_ID=boe-i18n + +# Development User Configuration (used when running locally without SSO) +# Customize these to simulate different users during development +DEV_USER_ID=joohwi.lee +DEV_USER_NAME=Joohwi Lee +DEV_USER_EMAIL=joohwi.lee@bytedance.com +# DEV_USER_AVATAR=https://example.com/avatar.jpg + +# Public dev user vars (for client-side fallback) +NEXT_PUBLIC_DEV_USER_ID=joohwi.lee +NEXT_PUBLIC_DEV_USER_NAME=Joohwi Lee +NEXT_PUBLIC_DEV_USER_EMAIL=joohwi.lee@bytedance.com +# NEXT_PUBLIC_DEV_USER_AVATAR=https://example.com/avatar.jpg diff --git a/packages/kiana/.gitignore b/packages/kiana/.gitignore new file mode 100644 index 00000000000..55f5869f14a --- /dev/null +++ b/packages/kiana/.gitignore @@ -0,0 +1,118 @@ +# Dependencies +node_modules/ +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* + +# Runtime data +pids +*.pid +*.seed +*.pid.lock + +# Coverage directory used by tools like istanbul +coverage/ +*.lcov + +# nyc test coverage +.nyc_output + +# Grunt intermediate storage +.grunt + +# Bower dependency directory +bower_components + +# node-waf configuration +.lock-wscript + +# Compiled binary addons +build/Release + +# Dependency directories +jspm_packages/ + +# TypeScript cache +*.tsbuildinfo + +# Optional npm cache directory +.npm + +# Optional eslint cache +.eslintcache + +# Microbundle cache +.rpt2_cache/ +.rts2_cache_cjs/ +.rts2_cache_es/ +.rts2_cache_umd/ + +# Optional REPL history +.node_repl_history + +# Output of 'npm pack' +*.tgz + +# Yarn Integrity file +.yarn-integrity + +# dotenv environment variables file +.env +.env.test +.env.local + +# parcel-bundler cache +.cache +.parcel-cache + +# Next.js build output +.next + +# Nuxt.js build / generate output +.nuxt +dist + +# Gatsby files +.cache/ +public + +# Storybook build outputs +.out +.storybook-out + +# Temporary folders +tmp/ +temp/ + +# Editor directories and files +.vscode/ +.idea/ +*.swp +*.swo +*~ + +# OS generated files +.DS_Store +.DS_Store? +._* +.Spotlight-V100 +.Trashes +ehthumbs.db +Thumbs.db + +# Build outputs +dist/ +build/ +lib/ + +# Test outputs +test-results/ + +# Logs +logs +*.log + +# Runtime traces +trace +*.trace \ No newline at end of file diff --git a/packages/kiana/kiana.jsonc b/packages/kiana/kiana.jsonc new file mode 100644 index 00000000000..72d963fb24a --- /dev/null +++ b/packages/kiana/kiana.jsonc @@ -0,0 +1,8 @@ +{ + "provider": { + "type": "openai-compatible", + "apiKey": "23ed9d5c-c634-4cc1-9e70-9c9ac63a17ef", + "model": "ep-20250821060450-4bc6g", + "baseUrl": "https://ark-ap-southeast.byteintl.net/api/v3" + } +} diff --git a/packages/kiana/package.json b/packages/kiana/package.json new file mode 100644 index 00000000000..d245e7e57e2 --- /dev/null +++ b/packages/kiana/package.json @@ -0,0 +1,39 @@ +{ + "name": "kiana", + "version": "0.1.0", + "description": "Kiana - Minimal headless coding agent", + "type": "module", + "main": "./dist/index.js", + "types": "./dist/index.d.ts", + "bin": { + "kiana": "./dist/cli.js" + }, + "exports": { + ".": { + "types": "./dist/index.d.ts", + "import": "./dist/index.js" + } + }, + "scripts": { + "build": "tsc", + "dev": "tsc --watch", + "typecheck": "tsc --noEmit" + }, + "dependencies": { + "ai": "beta", + "@ai-sdk/anthropic": "beta", + "@ai-sdk/openai": "beta", + "@ai-sdk/openai-compatible": "beta", + "@ai-sdk/google": "beta", + "zod": "latest", + "jsonc-parser": "latest" + }, + "devDependencies": { + "typescript": "latest", + "@types/node": "latest" + }, + "engines": { + "node": ">=20" + }, + "license": "MIT" +} diff --git a/packages/kiana/src/cli.ts b/packages/kiana/src/cli.ts new file mode 100644 index 00000000000..9dfa72df4f8 --- /dev/null +++ b/packages/kiana/src/cli.ts @@ -0,0 +1,416 @@ +#!/usr/bin/env node + +import * as fs from "node:fs" +import * as path from "node:path" +import * as readline from "node:readline" +import { createSession } from "./session.js" +import { loadConfig, writeConfigTemplate } from "./config.js" +import { VERSION } from "./index.js" + +interface CliArgs { + config?: string + session?: string + cwd?: string + createConfig?: string + humanReadable?: boolean + verbose?: boolean + prompt?: string + help?: boolean + version?: boolean +} + +function parseArgs(args: string[]): CliArgs { + const result: CliArgs = {} + + for (let i = 0; i < args.length; i++) { + const arg = args[i] + + if (arg === "--config" || arg === "-c") { + result.config = args[++i] + } else if (arg === "--session" || arg === "-s") { + result.session = args[++i] + } else if (arg === "--cwd") { + result.cwd = args[++i] + } else if (arg === "--create-config") { + result.createConfig = args[++i] + } else if (arg === "--human-readable" || arg === "-H") { + result.humanReadable = true + } else if (arg === "--prompt" || arg === "-p") { + result.prompt = args[++i] + } else if (arg === "--verbose") { + result.verbose = true + } else if (arg === "--help" || arg === "-h") { + result.help = true + } else if (arg === "--version" || arg === "-V") { + result.version = true + } + } + + return result +} + +function printHelp(): void { + console.log(` +Kiana - Minimal Headless Coding Agent + +Usage: + kiana --config [options] + kiana --create-config + +Options: + --config, -c Path to JSONC config file (required) + --prompt, -p Initial prompt to send (exits after completion) + --session, -s Directory to persist session + --cwd Working directory for tools + --human-readable, -H Output in human-readable format (streaming text) + --verbose Show detailed tool inputs/outputs (with -H) + --create-config Write config template and exit + --help, -h Show this help message + --version, -V Show version + +Stdin/Stdout Protocol: + Input (stdin): Line-delimited JSON commands + {"type": "message", "text": "Fix the bug in auth.ts"} + {"type": "abort"} + + Output (stdout): Line-delimited JSON events + {"type": "session.created", "properties": {...}} + {"type": "message.part.updated", "properties": {...}} + +Example: + kiana --config ./kiana.jsonc + echo '{"type":"message","text":"Hello"}' | kiana --config ./kiana.jsonc +`) +} + +function printVersion(): void { + console.log(`kiana v${VERSION}`) +} + +interface StdinCommand { + type: "message" | "abort" + text?: string +} + +// ANSI color codes +const colors = { + reset: "\x1b[0m", + bold: "\x1b[1m", + dim: "\x1b[2m", + cyan: "\x1b[36m", + green: "\x1b[32m", + yellow: "\x1b[33m", + blue: "\x1b[34m", + magenta: "\x1b[35m", + red: "\x1b[31m", + gray: "\x1b[90m", +} + +// Human-readable event formatter +function formatEventHumanReadable(event: any, verbose: boolean = false): string | null { + const indent = event.context ? " ".repeat(event.context.depth) : "" + const agentPrefix = event.context + ? `${colors.magenta}[${event.context.agentType || "subagent"}]${colors.reset} ` + : "" + + switch (event.type) { + case "session.created": + return `${colors.dim}Session started: ${event.properties.session.id}${colors.reset}\n` + + case "session.idle": + return `${colors.dim}--- Session idle ---${colors.reset}\n` + + case "message.created": + if (event.properties.message.role === "user") { + return null // User message text is shown via part.updated + } + return null // Don't show assistant message creation + + case "message.part.updated": { + const part = event.properties.part + const delta = event.properties.delta + + if (part.type === "text") { + if (delta) { + // Streaming text delta + return `${indent}${agentPrefix}${delta}` + } + return null // Final update without delta + } + + if (part.type === "reasoning") { + if (delta) { + return `${indent}${agentPrefix}${colors.dim}${delta}${colors.reset}` + } + return null + } + + if (part.type === "tool") { + const state = part.state + if (state.status === "running") { + const toolName = part.tool + const title = state.title || formatToolInput(toolName, state.input) + let output = `\n${indent}${agentPrefix}${colors.cyan}▶ ${toolName}${colors.reset} ${colors.gray}${title}${colors.reset}\n` + // In verbose mode, show the full input + if (verbose && state.input && Object.keys(state.input).length > 0) { + const inputStr = JSON.stringify(state.input, null, 2) + .split("\n") + .map((line) => `${indent} ${colors.dim}${line}${colors.reset}`) + .join("\n") + output += `${inputStr}\n` + } + return output + } + if (state.status === "completed") { + const title = state.title || part.tool + // Check if this is actually an error wrapped as a completed result + const isError = state.metadata?.error || state.metadata?.validationError || state.metadata?.parseError + const icon = isError ? "✗" : "✓" + const color = isError ? colors.red : colors.green + let output = `${indent}${agentPrefix}${color}${icon} ${title}${colors.reset}\n` + // In verbose mode, show the output (truncated) + if (verbose && state.output) { + const outputLines = String(state.output).split("\n").slice(0, 10) + if (String(state.output).split("\n").length > 10) { + outputLines.push("... (truncated)") + } + const outputStr = outputLines + .map((line) => `${indent} ${colors.dim}${line}${colors.reset}`) + .join("\n") + output += `${outputStr}\n` + } + return output + } + if (state.status === "error") { + let errorOutput = `${indent}${agentPrefix}${colors.red}✗ ${part.tool}: ${state.error}${colors.reset}\n` + // Always show the input that caused the error for debugging + if (state.input && Object.keys(state.input).length > 0) { + const inputStr = JSON.stringify(state.input, null, 2) + .split("\n") + .map((line) => `${indent} ${colors.dim}${line}${colors.reset}`) + .join("\n") + errorOutput += `${inputStr}\n` + } + return errorOutput + } + return null + } + + if (part.type === "step-start") { + return null // Don't show step start + } + + if (part.type === "step-finish") { + return `\n` // Add newline after step + } + + return null + } + + case "message.updated": { + const msg = event.properties.message + if (msg.error) { + return `${colors.red}Error: ${msg.error.message || msg.error}${colors.reset}\n` + } + return null + } + + case "todo.updated": { + const todos = event.properties.todos + if (!todos || todos.length === 0) return null + + let output = `\n${colors.yellow}📋 Todos:${colors.reset}\n` + for (const todo of todos) { + const icon = todo.status === "completed" ? "✓" : todo.status === "in_progress" ? "▶" : "○" + const color = todo.status === "completed" ? colors.green : todo.status === "in_progress" ? colors.cyan : colors.gray + output += ` ${color}${icon} ${todo.content}${colors.reset}\n` + } + return output + } + + default: + return null + } +} + +function formatToolInput(toolName: string, input: Record): string { + switch (toolName) { + case "read": + return String(input.filePath || input.file_path || "") + case "write": + return String(input.filePath || input.file_path || "") + case "edit": + return String(input.filePath || input.file_path || "") + case "bash": + return String(input.command || "").slice(0, 50) + (String(input.command || "").length > 50 ? "..." : "") + case "glob": + return String(input.pattern || "") + case "grep": + return String(input.pattern || "") + case "list": + return String(input.path || ".") + case "task": + return String(input.description || "") + default: + return "" + } +} + +async function main(): Promise { + const args = parseArgs(process.argv.slice(2)) + + if (args.help) { + printHelp() + process.exit(0) + } + + if (args.version) { + printVersion() + process.exit(0) + } + + if (args.createConfig) { + const configPath = path.resolve(args.createConfig) + writeConfigTemplate(configPath) + console.error(`Config template written to: ${configPath}`) + process.exit(0) + } + + if (!args.config) { + console.error("Error: --config is required") + console.error("Use --help for usage information") + process.exit(1) + } + + const configPath = path.resolve(args.config) + + if (!fs.existsSync(configPath)) { + console.error(`Error: Config file not found: ${configPath}`) + process.exit(1) + } + + // Load config + const config = loadConfig(configPath) + + // Override working directory if specified + if (args.cwd) { + config.workingDirectory = path.resolve(args.cwd) + } + + // Create session + const session = await createSession(config) + + // Subscribe to events and output + if (args.humanReadable) { + const verbose = args.verbose ?? false + session.onEvent((event) => { + const formatted = formatEventHumanReadable(event, verbose) + if (formatted) { + process.stdout.write(formatted) + } + }) + } else { + session.onEvent((event) => { + console.log(JSON.stringify(event)) + }) + } + + // Handle session persistence + if (args.session) { + const sessionDir = path.resolve(args.session) + fs.mkdirSync(sessionDir, { recursive: true }) + + // Save session info + const sessionFile = path.join(sessionDir, "session.json") + fs.writeFileSync( + sessionFile, + JSON.stringify( + { + id: session.id, + created: Date.now(), + config: configPath, + }, + null, + 2 + ) + ) + + // TODO: Implement full session persistence with messages/parts + } + + // If --prompt is provided, send it and exit after completion + if (args.prompt) { + await session.sendMessage(args.prompt) + process.exit(0) + } + + // Otherwise, read commands from stdin + let pendingMessage: Promise | null = null + let stdinClosed = false + + const rl = readline.createInterface({ + input: process.stdin, + output: undefined, + terminal: false, + }) + + rl.on("line", async (line) => { + if (!line.trim()) return + + try { + const command: StdinCommand = JSON.parse(line) + + switch (command.type) { + case "message": + if (command.text) { + pendingMessage = session.sendMessage(command.text) + await pendingMessage + pendingMessage = null + // If stdin already closed, exit after message completes + if (stdinClosed) { + process.exit(0) + } + } + break + + case "abort": + session.abort() + break + + default: + console.error(JSON.stringify({ error: `Unknown command type: ${(command as any).type}` })) + } + } catch (error) { + console.error( + JSON.stringify({ + error: `Failed to parse command: ${error instanceof Error ? error.message : String(error)}`, + }) + ) + } + }) + + rl.on("close", async () => { + stdinClosed = true + // Wait for any pending message to complete before exiting + if (pendingMessage) { + await pendingMessage + } + process.exit(0) + }) + + // Handle signals + process.on("SIGINT", () => { + session.abort() + process.exit(0) + }) + + process.on("SIGTERM", () => { + session.abort() + process.exit(0) + }) +} + +main().catch((error) => { + console.error(`Fatal error: ${error instanceof Error ? error.message : String(error)}`) + process.exit(1) +}) diff --git a/packages/kiana/src/config.ts b/packages/kiana/src/config.ts new file mode 100644 index 00000000000..5e0873e287a --- /dev/null +++ b/packages/kiana/src/config.ts @@ -0,0 +1,102 @@ +import { z } from "zod" +import { readFileSync, writeFileSync } from "node:fs" +import { parse as parseJsonc } from "jsonc-parser" + +export const ProviderConfigSchema = z.object({ + type: z.enum(["anthropic", "openai", "openai-compatible", "google"]), + apiKey: z.string(), + model: z.string(), + baseUrl: z.string().nullable().optional(), +}) + +export type ProviderConfig = z.infer + +export const ConfigSchema = z.object({ + provider: ProviderConfigSchema, + systemPrompt: z.string().optional(), + workingDirectory: z.string().nullable().optional(), + tools: z.array(z.string()).nullable().optional(), +}) + +export type Config = z.infer + +/** + * Load and validate config from a JSONC file. + */ +export function loadConfig(path: string): Config { + const content = readFileSync(path, "utf-8") + const parsed = parseJsonc(content) + + const result = ConfigSchema.safeParse(parsed) + if (!result.success) { + const errors = result.error.issues + .map((e: z.ZodIssue) => ` ${e.path.join(".")}: ${e.message}`) + .join("\n") + throw new Error(`Invalid config at ${path}:\n${errors}`) + } + + return result.data +} + +/** + * Default system prompt for Kiana headless mode. + */ +export const DEFAULT_SYSTEM_PROMPT = `You are Kiana, a powerful coding agent running in headless mode. + +You help users with software engineering tasks using the tools available to you. You are running non-interactively, so you cannot ask clarifying questions - make reasonable assumptions and proceed. + +IMPORTANT: You must NEVER generate or guess URLs unless you are confident they are for programming help. + +# Tone and style +- No emojis unless explicitly requested. +- Responses should be short and concise. +- Use Github-flavored markdown for formatting. +- Output text to communicate; use tools only to complete tasks. +- NEVER create files unless absolutely necessary. Prefer editing existing files. + +# Professional objectivity +Prioritize technical accuracy over validation. Focus on facts and problem-solving with direct, objective technical info. + +# Non-interactive mode +Since you are running headless without user interaction: +- Do not ask for clarification - make reasonable assumptions +- Complete tasks autonomously +- Report progress and results via tool outputs + +# Tool usage +- Call multiple tools in parallel when independent. +- Use specialized tools (Read, Write, Edit, Glob, Grep) instead of bash equivalents. + +# Code References +When referencing code, include \`file_path:line_number\` for navigation.` + +/** + * Config template for --create-config flag. + */ +export const CONFIG_TEMPLATE = `{ + // Provider configuration + "provider": { + "type": "anthropic", // "anthropic" | "openai" | "openai-compatible" | "google" + "apiKey": "YOUR_API_KEY", + "model": "claude-sonnet-4-20250514", + "baseUrl": null // Required for openai-compatible + }, + + // System prompt (optional, has sensible default) + "systemPrompt": null, + + // Working directory for tools (optional, defaults to current dir) + "workingDirectory": null, + + // Tool whitelist (optional, null = all tools enabled) + // Available: bash, read, write, edit, glob, grep, list, webfetch, websearch, codesearch, todowrite, todoread, task + "tools": null +} +` + +/** + * Write config template to a file. + */ +export function writeConfigTemplate(path: string): void { + writeFileSync(path, CONFIG_TEMPLATE, "utf-8") +} diff --git a/packages/kiana/src/event.ts b/packages/kiana/src/event.ts new file mode 100644 index 00000000000..5ed488c26a5 --- /dev/null +++ b/packages/kiana/src/event.ts @@ -0,0 +1,111 @@ +import type { Event, EventType, SessionEvent, MessageEvent, PartEvent, TodoEvent } from "./types/event.js" + +// Re-export types for convenience +export type { Event as EventTypes, EventType, SessionEvent, MessageEvent, PartEvent, TodoEvent } + +type EventCallback = (event: Event) => void +type TypedCallback = ( + event: Extract +) => void + +/** + * Simple typed event bus for Kiana events. + * Supports both typed subscriptions (by event type) and global subscriptions. + */ +export class EventBus { + private listeners: Map> = new Map() + + /** + * Subscribe to a specific event type. + * Returns an unsubscribe function. + */ + subscribe(callback: EventCallback): () => void + subscribe(type: T, callback: TypedCallback): () => void + subscribe( + typeOrCallback: T | EventCallback, + callback?: TypedCallback + ): () => void { + if (typeof typeOrCallback === "function") { + // Global subscription + return this.subscribeAll(typeOrCallback) + } + + // Type-specific subscription + const type = typeOrCallback + const cb = callback! + const listeners = this.listeners.get(type) ?? new Set() + listeners.add(cb as EventCallback) + this.listeners.set(type, listeners) + + return () => { + listeners.delete(cb as EventCallback) + if (listeners.size === 0) { + this.listeners.delete(type) + } + } + } + + /** + * Subscribe to all events. + * Returns an unsubscribe function. + */ + subscribeAll(callback: EventCallback): () => void { + const listeners = this.listeners.get("*") ?? new Set() + listeners.add(callback) + this.listeners.set("*", listeners) + + return () => { + listeners.delete(callback) + if (listeners.size === 0) { + this.listeners.delete("*") + } + } + } + + /** + * Publish an event to all matching subscribers. + * Alias: emit + */ + publish(event: Event): void { + // Notify type-specific listeners + const typeListeners = this.listeners.get(event.type) + if (typeListeners) { + for (const callback of typeListeners) { + try { + callback(event) + } catch (err) { + console.error(`Error in event listener for ${event.type}:`, err) + } + } + } + + // Notify global listeners + const globalListeners = this.listeners.get("*") + if (globalListeners) { + for (const callback of globalListeners) { + try { + callback(event) + } catch (err) { + console.error(`Error in global event listener:`, err) + } + } + } + } + + /** + * Emit an event (alias for publish). + */ + emit(event: Event): void { + this.publish(event) + } + + /** + * Remove all listeners. + */ + clear(): void { + this.listeners.clear() + } +} + +// Default singleton instance +export const eventBus = new EventBus() diff --git a/packages/kiana/src/index.ts b/packages/kiana/src/index.ts new file mode 100644 index 00000000000..c7f073b3135 --- /dev/null +++ b/packages/kiana/src/index.ts @@ -0,0 +1,63 @@ +// Kiana - Minimal Headless Coding Agent Library +// A standalone TypeScript package providing a headless coding agent library + CLI wrapper + +// Session API +export { createSession, type Session, type SessionState, type MessageWithParts } from "./session.js" + +// Config +export { loadConfig, writeConfigTemplate, type Config, type ProviderConfig } from "./config.js" + +// Provider +export { createLanguageModel } from "./provider.js" + +// Event system +export { EventBus, type EventTypes, type SessionEvent, type MessageEvent, type PartEvent, type TodoEvent } from "./event.js" + +// Types +export type { SessionInfo } from "./types/session.js" +export type { MessageInfo } from "./types/message.js" +export type { + Part, + TextPart, + ReasoningPart, + ToolPart, + ToolState, + FilePart, + StepStartPart, + StepFinishPart, + SnapshotPart, + PatchPart, + AgentPart, + RetryPart, + CompactionPart, + SubtaskPart, +} from "./types/part.js" + +// Tools +export { + allTools, + getTools, + getToolInfo, + defineTool, + type Tool, + type ToolContext, + type ToolResult, + type ToolInfo, +} from "./tool/index.js" + +// Individual tools (for advanced usage) +export { bashTool } from "./tool/bash.js" +export { readTool } from "./tool/read.js" +export { writeTool } from "./tool/write.js" +export { editTool } from "./tool/edit.js" +export { globTool } from "./tool/glob.js" +export { grepTool } from "./tool/grep.js" +export { listTool } from "./tool/list.js" +export { webfetchTool } from "./tool/webfetch.js" +export { websearchTool } from "./tool/websearch.js" +export { codesearchTool } from "./tool/codesearch.js" +export { todoWriteTool, todoReadTool, getTodos, setTodos } from "./tool/todo.js" +export { taskTool, setSubagentExecutor } from "./tool/task.js" + +// Version +export const VERSION = "0.1.0" diff --git a/packages/kiana/src/provider.ts b/packages/kiana/src/provider.ts new file mode 100644 index 00000000000..09645772689 --- /dev/null +++ b/packages/kiana/src/provider.ts @@ -0,0 +1,54 @@ +import { createAnthropic } from "@ai-sdk/anthropic" +import { createOpenAI } from "@ai-sdk/openai" +import { createOpenAICompatible } from "@ai-sdk/openai-compatible" +import { createGoogleGenerativeAI } from "@ai-sdk/google" +import type { LanguageModel } from "ai" +import type { ProviderConfig } from "./config.js" + +/** + * Create a language model from provider config. + */ +export function createLanguageModel(config: ProviderConfig): LanguageModel { + switch (config.type) { + case "anthropic": { + const anthropic = createAnthropic({ + apiKey: config.apiKey, + baseURL: config.baseUrl ?? undefined, + }) + return anthropic(config.model) + } + + case "openai": { + const openai = createOpenAI({ + apiKey: config.apiKey, + baseURL: config.baseUrl ?? undefined, + }) + return openai(config.model) + } + + case "openai-compatible": { + if (!config.baseUrl) { + throw new Error("baseUrl is required for openai-compatible provider") + } + const compatible = createOpenAICompatible({ + name: "openai-compatible", + apiKey: config.apiKey, + baseURL: config.baseUrl, + }) + return compatible(config.model) + } + + case "google": { + const google = createGoogleGenerativeAI({ + apiKey: config.apiKey, + baseURL: config.baseUrl ?? undefined, + }) + return google(config.model) + } + + default: { + const _exhaustive: never = config.type + throw new Error(`Unknown provider type: ${_exhaustive}`) + } + } +} diff --git a/packages/kiana/src/session.ts b/packages/kiana/src/session.ts new file mode 100644 index 00000000000..012bd271f1c --- /dev/null +++ b/packages/kiana/src/session.ts @@ -0,0 +1,954 @@ +import { streamText, type LanguageModel, tool, jsonSchema } from "ai" +import { z } from "zod" +import { spawn } from "node:child_process" +import { EventBus, type EventTypes } from "./event.js" +import { Config } from "./config.js" +import { createLanguageModel } from "./provider.js" +import { allTools, getTools, setSubagentExecutor } from "./tool/index.js" +import type { Tool, ToolContext, ToolResult } from "./tool/tool.js" +import type { SessionInfo } from "./types/session.js" +import type { MessageInfo } from "./types/message.js" +import type { Part, TextPart, ToolPart, ReasoningPart, StepStartPart, StepFinishPart } from "./types/part.js" + +// ID generation +let idCounter = 0 +function generateId(prefix: string): string { + const timestamp = Date.now().toString(36) + const counter = (idCounter++).toString(36).padStart(4, "0") + return `${prefix}_${timestamp}${counter}` +} + +// Session state +interface SessionState { + id: string + projectID: string + config: Config + model: LanguageModel + tools: Record> + messages: MessageWithParts[] + abortController: AbortController + eventBus: EventBus + workingDirectory: string +} + +interface MessageWithParts { + info: MessageInfo + parts: Part[] +} + +export interface Session { + id: string + sendMessage(text: string): Promise + onEvent(callback: (event: EventTypes) => void): () => void + getTools(): { name: string; description: string }[] + abort(): void +} + +export async function createSession(config: Config): Promise { + const sessionID = generateId("session") + const projectID = generateId("project") + + const model = createLanguageModel(config.provider) + const tools = getTools(config.tools) + const workingDirectory = config.workingDirectory || process.cwd() + + const eventBus = new EventBus() + + const state: SessionState = { + id: sessionID, + projectID, + config, + model, + tools, + messages: [], + abortController: new AbortController(), + eventBus, + workingDirectory, + } + + // Setup subagent executor for Task tool + setSubagentExecutor(async (params) => { + // Create a subagent session + const subConfig: Config = { + ...config, + systemPrompt: params.agentConfig.systemPrompt, + } + + const subSession = await createSession(subConfig) + + // Build subagent context for event forwarding + const subagentContext = { + parentSessionID: params.parentSessionID, + depth: params.depth, + agentType: params.agentType, + } + + // Forward events from subsession with context + subSession.onEvent((event) => { + // Add subagent context to the event + const eventWithContext = { + ...event, + context: event.context + ? { + // If event already has context (nested subagent), increment depth + ...event.context, + depth: event.context.depth + params.depth, + } + : subagentContext, + } + eventBus.emit(eventWithContext as any) + }) + + // Send the prompt and wait for completion + await subSession.sendMessage(params.prompt) + + // Get the last assistant message as output + const lastMessage = state.messages.findLast((m) => m.info.role === "assistant") + const textPart = lastMessage?.parts.find((p): p is TextPart => p.type === "text") + + return { + output: textPart?.text || "No output from subagent", + sessionID: subSession.id, + } + }) + + // Emit session created event + const sessionInfo: SessionInfo = { + id: sessionID, + projectID, + directory: workingDirectory, + title: "New Session", + version: "1.0.0", + time: { + created: Date.now(), + updated: Date.now(), + }, + } + + eventBus.emit({ + type: "session.created", + properties: { session: sessionInfo }, + }) + + async function sendMessage(text: string): Promise { + // Create user message + const userMessageID = generateId("message") + const userMessage: MessageInfo = { + id: userMessageID, + sessionID, + role: "user", + time: { created: Date.now() }, + } + + const userTextPart: TextPart = { + id: generateId("part"), + sessionID, + messageID: userMessageID, + type: "text", + text, + } + + state.messages.push({ + info: userMessage, + parts: [userTextPart], + }) + + eventBus.emit({ + type: "message.created", + properties: { message: userMessage }, + }) + + eventBus.emit({ + type: "message.part.updated", + properties: { part: userTextPart }, + }) + + // Run agent loop + await runAgentLoop(state) + } + + function onEvent(callback: (event: EventTypes) => void): () => void { + return eventBus.subscribe(callback) + } + + function getToolsInfo(): { name: string; description: string }[] { + return Object.entries(state.tools).map(([name, tool]) => ({ + name, + description: tool.description, + })) + } + + function abort(): void { + state.abortController.abort() + state.abortController = new AbortController() + + eventBus.emit({ + type: "session.idle", + properties: { sessionID }, + }) + } + + return { + id: sessionID, + sendMessage, + onEvent, + getTools: getToolsInfo, + abort, + } +} + +async function runAgentLoop(state: SessionState): Promise { + const { eventBus, config, model, tools, workingDirectory } = state + + while (true) { + state.abortController.signal.throwIfAborted() + + // Get the last user message + const lastUserMessage = state.messages.findLast((m) => m.info.role === "user") + if (!lastUserMessage) break + + // Get the last assistant message + const lastAssistantMessage = state.messages.findLast((m) => m.info.role === "assistant") + + // Check if we should continue + if (lastAssistantMessage) { + const finishReason = (lastAssistantMessage.info as any).finish + if (finishReason && !["tool-calls", "unknown"].includes(finishReason)) { + if (lastUserMessage.info.id < lastAssistantMessage.info.id) { + break + } + } + } + + // Create assistant message + const assistantMessageID = generateId("message") + const assistantMessage: MessageInfo = { + id: assistantMessageID, + sessionID: state.id, + role: "assistant", + parentID: lastUserMessage.info.id, + modelID: config.provider.model, + providerID: config.provider.type, + time: { created: Date.now() }, + cost: 0, + tokens: { + input: 0, + output: 0, + reasoning: 0, + cache: { read: 0, write: 0 }, + }, + } + + const assistantParts: Part[] = [] + state.messages.push({ info: assistantMessage, parts: assistantParts }) + + eventBus.emit({ + type: "message.created", + properties: { message: assistantMessage }, + }) + + // Build system prompt with environment context + const baseSystemPrompt = config.systemPrompt || getDefaultSystemPrompt() + const envContext = await buildEnvironmentContext(workingDirectory) + const systemPrompt = baseSystemPrompt + "\n" + envContext + + // Build messages for AI + const aiMessages = buildAIMessages(state.messages) + + // Build tools for AI + const aiTools = buildAITools(state, assistantMessageID, assistantParts) + + try { + // Emit step start + const stepStartPart: StepStartPart = { + id: generateId("part"), + sessionID: state.id, + messageID: assistantMessageID, + type: "step-start", + } + assistantParts.push(stepStartPart) + eventBus.emit({ + type: "message.part.updated", + properties: { part: stepStartPart }, + }) + + // Stream the response + let currentTextPart: TextPart | undefined + let currentReasoningPart: ReasoningPart | undefined + const toolParts: Record = {} + + const stream = streamText({ + model, + system: systemPrompt, + messages: aiMessages, + tools: aiTools, + maxRetries: 0, + abortSignal: state.abortController.signal, + // Repair malformed tool calls - following OpenCode's approach + experimental_repairToolCall: async ({ toolCall, error }) => { + // First, try to repair common JSON errors from LLMs + const repairedJson = tryRepairJson(toolCall.input) + if (repairedJson !== null) { + // Successfully repaired - return the fixed tool call + return { + type: "tool-call" as const, + toolCallId: toolCall.toolCallId, + toolName: toolCall.toolName, + input: JSON.stringify(repairedJson), + } + } + + // Try to repair case-sensitivity issues (e.g., "Read" -> "read") + const lower = toolCall.toolName.toLowerCase() + if (lower !== toolCall.toolName && tools[lower]) { + return { + ...toolCall, + toolName: lower, + } + } + + // Could not repair - redirect to "invalid" tool so LLM can see the error + // and retry with corrected arguments + return { + ...toolCall, + input: JSON.stringify({ + tool: toolCall.toolName, + error: error.message, + }), + toolName: "invalid", + } + }, + }) + + for await (const chunk of stream.fullStream) { + state.abortController.signal.throwIfAborted() + + switch (chunk.type) { + case "text-start": + currentTextPart = { + id: generateId("part"), + sessionID: state.id, + messageID: assistantMessageID, + type: "text", + text: "", + time: { start: Date.now() }, + } + assistantParts.push(currentTextPart) + break + + case "text-delta": + if (currentTextPart) { + currentTextPart.text += chunk.text + eventBus.emit({ + type: "message.part.updated", + properties: { part: currentTextPart, delta: chunk.text }, + }) + } + break + + case "text-end": + if (currentTextPart) { + currentTextPart.text = currentTextPart.text.trimEnd() + currentTextPart.time = { + ...currentTextPart.time!, + end: Date.now(), + } + eventBus.emit({ + type: "message.part.updated", + properties: { part: currentTextPart }, + }) + currentTextPart = undefined + } + break + + case "reasoning-start": + currentReasoningPart = { + id: generateId("part"), + sessionID: state.id, + messageID: assistantMessageID, + type: "reasoning", + text: "", + time: { start: Date.now() }, + } + assistantParts.push(currentReasoningPart) + break + + case "reasoning-delta": + if (currentReasoningPart) { + currentReasoningPart.text += chunk.text + eventBus.emit({ + type: "message.part.updated", + properties: { part: currentReasoningPart, delta: chunk.text }, + }) + } + break + + case "reasoning-end": + if (currentReasoningPart) { + currentReasoningPart.text = currentReasoningPart.text.trimEnd() + currentReasoningPart.time = { + ...currentReasoningPart.time!, + end: Date.now(), + } + eventBus.emit({ + type: "message.part.updated", + properties: { part: currentReasoningPart }, + }) + currentReasoningPart = undefined + } + break + + case "tool-input-start": { + const toolPart: ToolPart = { + id: generateId("part"), + sessionID: state.id, + messageID: assistantMessageID, + type: "tool", + tool: chunk.toolName, + callID: chunk.id, + state: { + status: "pending", + input: {}, + raw: "", + }, + } + assistantParts.push(toolPart) + toolParts[chunk.id] = toolPart + eventBus.emit({ + type: "message.part.updated", + properties: { part: toolPart }, + }) + break + } + + case "tool-call": { + const toolPart = toolParts[chunk.toolCallId] + if (toolPart) { + toolPart.state = { + status: "running", + input: (chunk as any).input as Record ?? {}, + time: { start: Date.now() }, + } + eventBus.emit({ + type: "message.part.updated", + properties: { part: toolPart }, + }) + } + break + } + + case "tool-result": { + const toolPart = toolParts[chunk.toolCallId] + if (toolPart && toolPart.state.status === "running") { + const output = chunk.output as ToolResult + toolPart.state = { + status: "completed", + input: (chunk as any).input as Record ?? {}, + output: output?.output ?? "", + title: output?.title ?? "", + metadata: output?.metadata ?? {}, + time: { + start: toolPart.state.time.start, + end: Date.now(), + }, + } + eventBus.emit({ + type: "message.part.updated", + properties: { part: toolPart }, + }) + } + break + } + + case "tool-error": { + const toolPart = toolParts[chunk.toolCallId] + if (toolPart && toolPart.state.status === "running") { + toolPart.state = { + status: "error", + input: (chunk as any).input as Record ?? {}, + error: String(chunk.error), + time: { + start: toolPart.state.time.start, + end: Date.now(), + }, + } + eventBus.emit({ + type: "message.part.updated", + properties: { part: toolPart }, + }) + } + break + } + + case "finish-step": { + // Update message with usage + const usage = chunk.usage as any + if (usage) { + ;(assistantMessage as any).tokens = { + input: usage.promptTokens || usage.inputTokens || 0, + output: usage.completionTokens || usage.outputTokens || 0, + reasoning: 0, + cache: { read: 0, write: 0 }, + } + } + ;(assistantMessage as any).finish = chunk.finishReason + + // Emit step finish + const stepFinishPart: StepFinishPart = { + id: generateId("part"), + sessionID: state.id, + messageID: assistantMessageID, + type: "step-finish", + reason: chunk.finishReason, + tokens: (assistantMessage as any).tokens, + cost: 0, + } + assistantParts.push(stepFinishPart) + eventBus.emit({ + type: "message.part.updated", + properties: { part: stepFinishPart }, + }) + break + } + + case "error": + throw chunk.error + } + } + + // Update message completion time + assistantMessage.time.completed = Date.now() + + eventBus.emit({ + type: "message.updated", + properties: { message: assistantMessage }, + }) + + // Check if we should continue (tool calls) + const finishReason = (assistantMessage as any).finish + if (finishReason !== "tool-calls") { + break + } + } catch (error) { + // Handle abort + if (error instanceof Error && error.name === "AbortError") { + break + } + + // Log error and break + console.error("Agent loop error:", error) + ;(assistantMessage as any).error = { + name: error instanceof Error ? error.name : "Error", + message: error instanceof Error ? error.message : String(error), + } + + eventBus.emit({ + type: "message.updated", + properties: { message: assistantMessage }, + }) + + break + } + } + + eventBus.emit({ + type: "session.idle", + properties: { sessionID: state.id }, + }) +} + +function buildAIMessages(messages: MessageWithParts[]): any[] { + const aiMessages: any[] = [] + + // Group consecutive assistant messages together (they represent a single turn with tool loop) + let i = 0 + while (i < messages.length) { + const msg = messages[i] + + if (msg.info.role === "user") { + const textParts = msg.parts.filter((p): p is TextPart => p.type === "text") + if (textParts.length > 0) { + aiMessages.push({ + role: "user", + content: textParts.map((p) => p.text).join("\n"), + }) + } + i++ + } else if (msg.info.role === "assistant") { + // Collect all parts from consecutive assistant messages + const allTextParts: TextPart[] = [] + const allToolParts: ToolPart[] = [] + + while (i < messages.length && messages[i].info.role === "assistant") { + const assistantMsg = messages[i] + const textParts = assistantMsg.parts.filter((p): p is TextPart => p.type === "text" && !!p.text) + // Include both completed and error tool calls - LLM needs to see errors to recover + const toolParts = assistantMsg.parts.filter( + (p): p is ToolPart => + p.type === "tool" && (p.state.status === "completed" || p.state.status === "error") + ) + allTextParts.push(...textParts) + allToolParts.push(...toolParts) + i++ + } + + if (allToolParts.length > 0) { + // Build assistant message with text and tool calls + const content: any[] = [] + + for (const textPart of allTextParts) { + content.push({ type: "text", text: textPart.text }) + } + + for (const toolPart of allToolParts) { + content.push({ + type: "tool-call", + toolCallId: toolPart.callID, + toolName: toolPart.tool, + args: toolPart.state.input, + }) + } + + if (content.length > 0) { + aiMessages.push({ + role: "assistant", + content, + }) + } + + // Add tool results as separate messages (AI SDK 6 format) + for (const toolPart of allToolParts) { + // Handle both completed and error states + const outputValue = + toolPart.state.status === "error" + ? `Error: ${(toolPart.state as any).error}` + : (toolPart.state as any).output ?? "" + + aiMessages.push({ + role: "tool", + content: [ + { + type: "tool-result", + toolCallId: toolPart.callID, + toolName: toolPart.tool, + output: { + type: "text", + value: outputValue, + }, + }, + ], + }) + } + } else if (allTextParts.length > 0) { + // Just text, no tool calls + aiMessages.push({ + role: "assistant", + content: allTextParts.map((p) => p.text).join("\n"), + }) + } + } else { + i++ + } + } + + return aiMessages +} + +function buildAITools( + state: SessionState, + messageID: string, + parts: Part[] +): Record { + const { tools, workingDirectory, eventBus } = state + const aiTools: Record = {} + + for (const [name, toolDef] of Object.entries(tools)) { + // Use the actual tool schema - following OpenCode's approach + // The AI SDK will handle validation errors via tool-error events + const schema = z.toJSONSchema(toolDef.parameters) + + aiTools[name] = tool({ + description: toolDef.description, + inputSchema: jsonSchema(schema as any), + async execute(args, options) { + const ctx: ToolContext = { + sessionID: state.id, + messageID, + workingDirectory, + abort: options.abortSignal ?? new AbortController().signal, + metadata: (val) => { + // Find the tool part and update it + const toolPart = parts.find( + (p): p is ToolPart => p.type === "tool" && p.callID === options.toolCallId + ) + if (toolPart && toolPart.state.status === "running") { + toolPart.state = { + ...toolPart.state, + title: val.title, + metadata: val.metadata, + } + eventBus.emit({ + type: "message.part.updated", + properties: { part: toolPart }, + }) + } + }, + } + + // Execute the tool - let errors propagate naturally + // The AI SDK will catch them and emit tool-error events + // Validation is handled by the tool's defineTool wrapper + const result = await toolDef.execute(args, ctx) + return result + }, + toModelOutput(result: ToolResult) { + return { + type: "text", + value: result.output, + } + }, + }) + } + + return aiTools +} + +function getDefaultSystemPrompt(): string { + return `You are Kiana, a powerful coding agent running in headless mode. + +You help users with software engineering tasks using the tools available to you. You are running non-interactively, so you cannot ask clarifying questions - make reasonable assumptions and proceed. + +IMPORTANT: You must NEVER generate or guess URLs unless you are confident they are for programming help. + +# Tone and style +- No emojis unless explicitly requested. +- Responses should be short and concise. +- Use Github-flavored markdown for formatting. +- Output text to communicate; use tools only to complete tasks. +- NEVER create files unless absolutely necessary. Prefer editing existing files. + +# Professional objectivity +Prioritize technical accuracy over validation. Focus on facts and problem-solving with direct, objective technical info. + +# Non-interactive mode +Since you are running headless without user interaction: +- Do not ask for clarification - make reasonable assumptions +- Complete tasks autonomously +- Report progress and results via tool outputs + +# Tool usage +- Call multiple tools in parallel when independent. +- Use specialized tools (Read, Write, Edit, Glob, Grep) instead of bash equivalents. + +# Code References +When referencing code, include \`file_path:line_number\` for navigation.` +} + +/** + * Build environment context for the system prompt. + * Includes working directory, git info, platform, date, and file tree. + */ +async function buildEnvironmentContext(workingDirectory: string): Promise { + const isGitRepo = await checkIsGitRepo(workingDirectory) + const gitBranch = isGitRepo ? await getGitBranch(workingDirectory) : null + const gitStatus = isGitRepo ? await getGitStatus(workingDirectory) : null + const fileTree = await getFileTree(workingDirectory, 200) + + const lines = [ + `Here is useful information about the environment you are running in:`, + ``, + `Working directory: ${workingDirectory}`, + `Is directory a git repo: ${isGitRepo ? "Yes" : "No"}`, + ] + + if (gitBranch) { + lines.push(`Current branch: ${gitBranch}`) + } + + lines.push(`Platform: ${process.platform}`) + lines.push(`Today's date: ${new Date().toISOString().split("T")[0]}`) + lines.push(``) + + if (gitStatus) { + lines.push(``) + lines.push(``) + lines.push(gitStatus) + lines.push(``) + } + + if (fileTree) { + lines.push(``) + lines.push(``) + lines.push(fileTree) + lines.push(``) + } + + return lines.join("\n") +} + +/** + * Check if a directory is a git repository. + */ +async function checkIsGitRepo(cwd: string): Promise { + return new Promise((resolve) => { + const proc = spawn("git", ["rev-parse", "--is-inside-work-tree"], { + cwd, + stdio: ["ignore", "pipe", "pipe"], + }) + proc.on("close", (code) => resolve(code === 0)) + proc.on("error", () => resolve(false)) + }) +} + +/** + * Get the current git branch name. + */ +async function getGitBranch(cwd: string): Promise { + return new Promise((resolve) => { + const proc = spawn("git", ["branch", "--show-current"], { + cwd, + stdio: ["ignore", "pipe", "pipe"], + }) + let output = "" + proc.stdout.on("data", (data) => (output += data.toString())) + proc.on("close", (code) => { + if (code === 0 && output.trim()) { + resolve(output.trim()) + } else { + resolve(null) + } + }) + proc.on("error", () => resolve(null)) + }) +} + +/** + * Get git status (staged/unstaged changes, untracked files). + */ +async function getGitStatus(cwd: string): Promise { + return new Promise((resolve) => { + const proc = spawn("git", ["status", "--short"], { + cwd, + stdio: ["ignore", "pipe", "pipe"], + }) + let output = "" + proc.stdout.on("data", (data) => (output += data.toString())) + proc.on("close", (code) => { + if (code === 0 && output.trim()) { + // Limit to first 50 lines + const lines = output.trim().split("\n").slice(0, 50) + if (output.trim().split("\n").length > 50) { + lines.push("... (truncated)") + } + resolve(lines.join("\n")) + } else { + resolve(null) + } + }) + proc.on("error", () => resolve(null)) + }) +} + +/** + * Get file tree using ripgrep (rg --files) or fallback to find. + */ +async function getFileTree(cwd: string, limit: number): Promise { + // Try ripgrep first (faster, respects .gitignore) + const rgResult = await runCommand("rg", ["--files", "--sort", "path"], cwd, limit) + if (rgResult) return rgResult + + // Fallback to git ls-files if in a git repo + const gitResult = await runCommand("git", ["ls-files"], cwd, limit) + if (gitResult) return gitResult + + // Last resort: find command + const findResult = await runCommand( + "find", + [".", "-type", "f", "-not", "-path", "*/.*", "-not", "-path", "*/node_modules/*"], + cwd, + limit + ) + return findResult +} + +/** + * Run a command and return stdout, limited to a number of lines. + */ +async function runCommand( + cmd: string, + args: string[], + cwd: string, + lineLimit: number +): Promise { + return new Promise((resolve) => { + const proc = spawn(cmd, args, { + cwd, + stdio: ["ignore", "pipe", "pipe"], + }) + let output = "" + proc.stdout.on("data", (data) => (output += data.toString())) + proc.on("close", (code) => { + if (code === 0 && output.trim()) { + const lines = output.trim().split("\n").slice(0, lineLimit) + if (output.trim().split("\n").length > lineLimit) { + lines.push(`... (${output.trim().split("\n").length - lineLimit} more files)`) + } + resolve(lines.join("\n")) + } else { + resolve(null) + } + }) + proc.on("error", () => resolve(null)) + }) +} + +/** + * Attempt to repair common JSON errors from LLMs. + * Returns the parsed object if successful, null if repair failed. + * + * Common errors handled: + * - Missing quote before property name: {"a":"b",c":"d"} -> {"a":"b","c":"d"} + * - Unquoted property names: {command:"ls"} -> {"command":"ls"} + * - Single quotes: {'key':'val'} -> {"key":"val"} + * - Trailing commas: {"a":1,} -> {"a":1} + */ +function tryRepairJson(input: string): Record | null { + // First try parsing as-is + try { + return JSON.parse(input) + } catch { + // Continue to repair attempts + } + + let repaired = input.trim() + + // Fix 1: Add missing quotes around unquoted property names + // {command:"value"} -> {"command":"value"} + repaired = repaired.replace(/([{,]\s*)([a-zA-Z_][a-zA-Z0-9_]*)\s*:/g, '$1"$2":') + + // Fix 2: Fix missing quote before property name after comma + // {"a":"b",c":"d"} -> {"a":"b","c":"d"} + repaired = repaired.replace(/,([a-zA-Z_][a-zA-Z0-9_]*)"/g, ',"$1"') + + // Fix 3: Replace single quotes with double quotes (careful with strings containing apostrophes) + // Only replace if it looks like JSON structure + if (repaired.includes("'")) { + repaired = repaired.replace(/'([^']*)'(\s*[,}\]:])/g, '"$1"$2') + } + + // Fix 4: Remove trailing commas before } or ] + repaired = repaired.replace(/,(\s*[}\]])/g, "$1") + + // Fix 5: Handle unescaped newlines in strings (replace with \n) + // This is a simple heuristic - may not work for all cases + repaired = repaired.replace(/:\s*"([^"]*)\n([^"]*)"/g, ':"$1\\n$2"') + + try { + return JSON.parse(repaired) + } catch { + return null + } +} + +// Export types +export type { SessionState, MessageWithParts } diff --git a/packages/kiana/src/tool/bash.ts b/packages/kiana/src/tool/bash.ts new file mode 100644 index 00000000000..4f3f3d8f24d --- /dev/null +++ b/packages/kiana/src/tool/bash.ts @@ -0,0 +1,252 @@ +import { z } from "zod" +import { spawn } from "node:child_process" +import { defineTool } from "./tool.js" + +const DEFAULT_MAX_OUTPUT_LENGTH = 30_000 +const MAX_OUTPUT_LENGTH = (() => { + const parsed = Number(process.env.KIANA_BASH_MAX_OUTPUT_LENGTH) + return Number.isInteger(parsed) && parsed > 0 ? parsed : DEFAULT_MAX_OUTPUT_LENGTH +})() +const DEFAULT_TIMEOUT = 2 * 60 * 1000 // 2 minutes +const MAX_TIMEOUT = 10 * 60 * 1000 // 10 minutes +const SIGKILL_TIMEOUT_MS = 200 + +const DESCRIPTION = `Executes a given bash command in a persistent shell session with optional timeout, ensuring proper handling and security measures. + +Before executing the command, please follow these steps: + +1. Directory Verification: + - If the command will create new directories or files, first use the List tool to verify the parent directory exists and is the correct location + - For example, before running "mkdir foo/bar", first use List to check that "foo" exists and is the intended parent directory + +2. Command Execution: + - Always quote file paths that contain spaces with double quotes (e.g., cd "path with spaces/file.txt") + - Examples of proper quoting: + - cd "/Users/name/My Documents" (correct) + - cd /Users/name/My Documents (incorrect - will fail) + - python "/path/with spaces/script.py" (correct) + - python /path/with spaces/script.py (incorrect - will fail) + - After ensuring proper quoting, execute the command. + - Capture the output of the command. + +Usage notes: + - The command argument is required. + - You can specify an optional timeout in milliseconds (up to 600000ms / 10 minutes). If not specified, commands will timeout after 120000ms (2 minutes). + - It is very helpful if you write a clear, concise description of what this command does in 5-10 words. + - If the output exceeds 30000 characters, output will be truncated before being returned to you. + - VERY IMPORTANT: You MUST avoid using search commands like \`find\` and \`grep\`. Instead use Grep, Glob, or Task to search. You MUST avoid read tools like \`cat\`, \`head\`, \`tail\`, and \`ls\`, and use Read and List to read files. + - If you _still_ need to run \`grep\`, STOP. ALWAYS USE ripgrep at \`rg\` (or /usr/bin/rg) first, which all users have pre-installed. + - When issuing multiple commands, use the ';' or '&&' operator to separate them. DO NOT use newlines (newlines are ok in quoted strings). + - Try to maintain your current working directory throughout the session by using absolute paths and avoiding usage of \`cd\`. You may use \`cd\` if the User explicitly requests it. + + pytest /foo/bar/tests + + + cd /foo/bar && pytest tests + + + +# Committing changes with git + +IMPORTANT: ONLY COMMIT IF THE USER ASKS YOU TO. + +If and only if the user asks you to create a new git commit, follow these steps carefully: + +1. You have the capability to call multiple tools in a single response. When multiple independent pieces of information are requested, batch your tool calls together for optimal performance. ALWAYS run the following bash commands in parallel, each using the Bash tool: + - Run a git status command to see all untracked files. + - Run a git diff command to see both staged and unstaged changes that will be committed. + - Run a git log command to see recent commit messages, so that you can follow this repository's commit message style. + +2. Analyze all staged changes (both previously staged and newly added) and draft a commit message. + +3. Create the commit with a meaningful message. + +Important notes: +- Use the git context at the start of this conversation to determine which files are relevant to your commit. +- NEVER update the git config +- DO NOT push to the remote repository +- IMPORTANT: Never use git commands with the -i flag (like git rebase -i or git add -i) since they require interactive input which is not supported. +- If there are no changes to commit (i.e., no untracked files and no modifications), do not create an empty commit + +# Creating pull requests +Use the gh command via the Bash tool for ALL GitHub-related tasks including working with issues, pull requests, checks, and releases.` + +export const bashTool = defineTool("bash", { + description: DESCRIPTION, + parameters: z.object({ + command: z.string().describe("The command to execute"), + timeout: z.number().describe("Optional timeout in milliseconds").optional(), + description: z + .string() + .describe( + "Clear, concise description of what this command does in 5-10 words. Examples:\nInput: ls\nOutput: Lists files in current directory\n\nInput: git status\nOutput: Shows working tree status\n\nInput: npm install\nOutput: Installs package dependencies\n\nInput: mkdir foo\nOutput: Creates directory 'foo'" + ), + }), + async execute(params, ctx) { + if (params.timeout !== undefined && params.timeout < 0) { + throw new Error(`Invalid timeout value: ${params.timeout}. Timeout must be a positive number.`) + } + const timeout = Math.min(params.timeout ?? DEFAULT_TIMEOUT, MAX_TIMEOUT) + + // Determine shell to use + const shell = (() => { + const s = process.env.SHELL + if (s) { + // Avoid fish and nu shells + if (!new Set(["/bin/fish", "/bin/nu", "/usr/bin/fish", "/usr/bin/nu"]).has(s)) { + return s + } + } + + if (process.platform === "darwin") { + return "/bin/zsh" + } + + if (process.platform === "win32") { + return process.env.COMSPEC || "cmd.exe" + } + + // Try to find bash + return "/bin/bash" + })() + + const proc = spawn(params.command, { + shell, + cwd: ctx.workingDirectory, + env: { + ...process.env, + }, + stdio: ["ignore", "pipe", "pipe"], + detached: process.platform !== "win32", + }) + + let output = "" + + // Initialize metadata with empty output + ctx.metadata({ + metadata: { + output: "", + description: params.description, + }, + }) + + const append = (chunk: Buffer) => { + if (output.length <= MAX_OUTPUT_LENGTH) { + output += chunk.toString() + ctx.metadata({ + metadata: { + output, + description: params.description, + }, + }) + } + } + + proc.stdout?.on("data", append) + proc.stderr?.on("data", append) + + let timedOut = false + let aborted = false + let exited = false + + const killTree = async () => { + const pid = proc.pid + if (!pid || exited) { + return + } + + if (process.platform === "win32") { + await new Promise((resolve) => { + const killer = spawn("taskkill", ["/pid", String(pid), "/f", "/t"], { stdio: "ignore" }) + killer.once("exit", resolve) + killer.once("error", () => resolve()) + }) + return + } + + try { + process.kill(-pid, "SIGTERM") + await sleep(SIGKILL_TIMEOUT_MS) + if (!exited) { + process.kill(-pid, "SIGKILL") + } + } catch { + proc.kill("SIGTERM") + await sleep(SIGKILL_TIMEOUT_MS) + if (!exited) { + proc.kill("SIGKILL") + } + } + } + + if (ctx.abort.aborted) { + aborted = true + await killTree() + } + + const abortHandler = () => { + aborted = true + void killTree() + } + + ctx.abort.addEventListener("abort", abortHandler, { once: true }) + + const timeoutTimer = setTimeout(() => { + timedOut = true + void killTree() + }, timeout + 100) + + await new Promise((resolve, reject) => { + const cleanup = () => { + clearTimeout(timeoutTimer) + ctx.abort.removeEventListener("abort", abortHandler) + } + + proc.once("exit", () => { + exited = true + cleanup() + resolve() + }) + + proc.once("error", (error) => { + exited = true + cleanup() + reject(error) + }) + }) + + const resultMetadata: string[] = [""] + + if (output.length > MAX_OUTPUT_LENGTH) { + output = output.slice(0, MAX_OUTPUT_LENGTH) + resultMetadata.push(`bash tool truncated output as it exceeded ${MAX_OUTPUT_LENGTH} char limit`) + } + + if (timedOut) { + resultMetadata.push(`bash tool terminated command after exceeding timeout ${timeout} ms`) + } + + if (aborted) { + resultMetadata.push("User aborted the command") + } + + if (resultMetadata.length > 1) { + resultMetadata.push("") + output += "\n\n" + resultMetadata.join("\n") + } + + return { + title: params.description, + metadata: { + output, + exit: proc.exitCode, + description: params.description, + }, + output, + } + }, +}) + +function sleep(ms: number): Promise { + return new Promise((resolve) => setTimeout(resolve, ms)) +} diff --git a/packages/kiana/src/tool/codesearch.ts b/packages/kiana/src/tool/codesearch.ts new file mode 100644 index 00000000000..0d407e01af6 --- /dev/null +++ b/packages/kiana/src/tool/codesearch.ts @@ -0,0 +1,135 @@ +import { z } from "zod" +import { defineTool } from "./tool.js" + +const DESCRIPTION = `- Search and get relevant context for any programming task using Exa Code API +- Provides the highest quality and freshest context for libraries, SDKs, and APIs +- Use this tool for ANY question or task related to programming +- Returns comprehensive code examples, documentation, and API references +- Optimized for finding specific programming patterns and solutions + +Usage notes: + - Adjustable token count (1000-50000) for focused or comprehensive results + - Default 5000 tokens provides balanced context for most queries + - Use lower values for specific questions, higher values for comprehensive documentation + - Supports queries about frameworks, libraries, APIs, and programming concepts + - Examples: 'React useState hook examples', 'Python pandas dataframe filtering', 'Express.js middleware'` + +const API_CONFIG = { + BASE_URL: "https://mcp.exa.ai", + ENDPOINTS: { + CONTEXT: "/mcp", + }, +} as const + +interface McpCodeRequest { + jsonrpc: string + id: number + method: string + params: { + name: string + arguments: { + query: string + tokensNum: number + } + } +} + +interface McpCodeResponse { + jsonrpc: string + result: { + content: Array<{ + type: string + text: string + }> + } +} + +export const codesearchTool = defineTool("codesearch", { + description: DESCRIPTION, + parameters: z.object({ + query: z + .string() + .describe( + "Search query to find relevant context for APIs, Libraries, and SDKs. For example, 'React useState hook examples', 'Python pandas dataframe filtering', 'Express.js middleware', 'Next js partial prerendering configuration'" + ), + tokensNum: z + .number() + .min(1000) + .max(50000) + .default(5000) + .describe( + "Number of tokens to return (1000-50000). Default is 5000 tokens. Adjust this value based on how much context you need - use lower values for focused queries and higher values for comprehensive documentation." + ), + }), + async execute(params, ctx) { + const codeRequest: McpCodeRequest = { + jsonrpc: "2.0", + id: 1, + method: "tools/call", + params: { + name: "get_code_context_exa", + arguments: { + query: params.query, + tokensNum: params.tokensNum || 5000, + }, + }, + } + + const controller = new AbortController() + const timeoutId = setTimeout(() => controller.abort(), 30000) + + try { + const headers: Record = { + accept: "application/json, text/event-stream", + "content-type": "application/json", + } + + const signals = ctx.abort ? [controller.signal, ctx.abort] : [controller.signal] + const response = await fetch(`${API_CONFIG.BASE_URL}${API_CONFIG.ENDPOINTS.CONTEXT}`, { + method: "POST", + headers, + body: JSON.stringify(codeRequest), + signal: AbortSignal.any(signals), + }) + + clearTimeout(timeoutId) + + if (!response.ok) { + const errorText = await response.text() + throw new Error(`Code search error (${response.status}): ${errorText}`) + } + + const responseText = await response.text() + + // Parse SSE response + const lines = responseText.split("\n") + for (const line of lines) { + if (line.startsWith("data: ")) { + const data: McpCodeResponse = JSON.parse(line.substring(6)) + if (data.result && data.result.content && data.result.content.length > 0) { + return { + output: data.result.content[0].text, + title: `Code search: ${params.query}`, + metadata: {}, + } + } + } + } + + return { + output: + "No code snippets or documentation found. Please try a different query, be more specific about the library or programming concept, or check the spelling of framework names.", + title: `Code search: ${params.query}`, + metadata: {}, + } + } catch (error) { + clearTimeout(timeoutId) + + if (error instanceof Error && error.name === "AbortError") { + throw new Error("Code search request timed out") + } + + throw error + } + }, +}) diff --git a/packages/kiana/src/tool/edit.ts b/packages/kiana/src/tool/edit.ts new file mode 100644 index 00000000000..43de87b5012 --- /dev/null +++ b/packages/kiana/src/tool/edit.ts @@ -0,0 +1,648 @@ +// the approaches in this edit tool are sourced from +// https://github.com/cline/cline/blob/main/evals/diff-edits/diff-apply/diff-06-23-25.ts +// https://github.com/google-gemini/gemini-cli/blob/main/packages/core/src/utils/editCorrector.ts +// https://github.com/cline/cline/blob/main/evals/diff-edits/diff-apply/diff-06-26-25.ts + +import { z } from "zod" +import * as fs from "node:fs" +import * as path from "node:path" +import { defineTool } from "./tool.js" + +const DESCRIPTION = `Performs exact string replacements in files. + +Usage: +- You must use your \`Read\` tool at least once in the conversation before editing. This tool will error if you attempt an edit without reading the file. +- When editing text from Read tool output, ensure you preserve the exact indentation (tabs/spaces) as it appears AFTER the line number prefix. The line number prefix format is: spaces + line number + tab. Everything after that tab is the actual file content to match. Never include any part of the line number prefix in the oldString or newString. +- ALWAYS prefer editing existing files in the codebase. NEVER write new files unless explicitly required. +- Only use emojis if the user explicitly requests it. Avoid adding emojis to files unless asked. +- The edit will FAIL if \`oldString\` is not found in the file with an error "oldString not found in content". +- The edit will FAIL if \`oldString\` is found multiple times in the file with an error "oldString found multiple times and requires more code context to uniquely identify the intended match". Either provide a larger string with more surrounding context to make it unique or use \`replaceAll\` to change every instance of \`oldString\`. +- Use \`replaceAll\` for replacing and renaming strings across the file. This parameter is useful if you want to rename a variable for instance.` + +export const editTool = defineTool("edit", { + description: DESCRIPTION, + parameters: z.object({ + filePath: z.string().describe("The absolute path to the file to modify"), + oldString: z.string().describe("The text to replace"), + newString: z + .string() + .describe("The text to replace it with (must be different from oldString)"), + replaceAll: z + .boolean() + .optional() + .describe("Replace all occurrences of oldString (default false)"), + }), + async execute(params, ctx) { + if (!params.filePath) { + throw new Error("filePath is required") + } + + if (params.oldString === params.newString) { + throw new Error("oldString and newString must be different") + } + + const filePath = path.isAbsolute(params.filePath) + ? params.filePath + : path.join(ctx.workingDirectory, params.filePath) + + let diff = "" + let contentOld = "" + let contentNew = "" + + // Handle creating new file with empty oldString + if (params.oldString === "") { + contentNew = params.newString + diff = createSimpleDiff(filePath, contentOld, contentNew) + + // Ensure parent directory exists + const dir = path.dirname(filePath) + if (!fs.existsSync(dir)) { + fs.mkdirSync(dir, { recursive: true }) + } + + fs.writeFileSync(filePath, params.newString, "utf-8") + + return { + metadata: { + diagnostics: {}, + diff, + filediff: { + file: filePath, + before: contentOld, + after: contentNew, + additions: countLines(contentNew), + deletions: 0, + }, + }, + title: path.relative(ctx.workingDirectory, filePath), + output: "", + } + } + + // Read existing file + if (!fs.existsSync(filePath)) { + throw new Error(`File ${filePath} not found`) + } + + const stats = fs.statSync(filePath) + if (stats.isDirectory()) { + throw new Error(`Path is a directory, not a file: ${filePath}`) + } + + contentOld = fs.readFileSync(filePath, "utf-8") + contentNew = replace(contentOld, params.oldString, params.newString, params.replaceAll) + + diff = createSimpleDiff(filePath, normalizeLineEndings(contentOld), normalizeLineEndings(contentNew)) + + fs.writeFileSync(filePath, contentNew, "utf-8") + + // Re-read to get actual content + contentNew = fs.readFileSync(filePath, "utf-8") + diff = createSimpleDiff(filePath, normalizeLineEndings(contentOld), normalizeLineEndings(contentNew)) + + // Calculate additions/deletions + const oldLines = contentOld.split("\n") + const newLines = contentNew.split("\n") + let additions = 0 + let deletions = 0 + + // Simple diff counting + const oldSet = new Set(oldLines) + const newSet = new Set(newLines) + + for (const line of newLines) { + if (!oldSet.has(line)) additions++ + } + for (const line of oldLines) { + if (!newSet.has(line)) deletions++ + } + + return { + metadata: { + diagnostics: {}, + diff, + filediff: { + file: filePath, + before: contentOld, + after: contentNew, + additions, + deletions, + }, + }, + title: path.relative(ctx.workingDirectory, filePath), + output: "", + } + }, +}) + +function normalizeLineEndings(text: string): string { + return text.replaceAll("\r\n", "\n") +} + +function countLines(text: string): number { + return text.split("\n").length +} + +function createSimpleDiff(filePath: string, oldContent: string, newContent: string): string { + const oldLines = oldContent.split("\n") + const newLines = newContent.split("\n") + + const lines: string[] = [ + `--- ${filePath}`, + `+++ ${filePath}`, + ] + + // Simple unified diff format + let i = 0 + let j = 0 + + while (i < oldLines.length || j < newLines.length) { + if (i >= oldLines.length) { + lines.push(`+${newLines[j]}`) + j++ + } else if (j >= newLines.length) { + lines.push(`-${oldLines[i]}`) + i++ + } else if (oldLines[i] === newLines[j]) { + lines.push(` ${oldLines[i]}`) + i++ + j++ + } else { + // Find next matching line + let foundMatch = false + for (let lookAhead = 1; lookAhead < 5; lookAhead++) { + if (i + lookAhead < oldLines.length && oldLines[i + lookAhead] === newLines[j]) { + // Old lines were deleted + for (let k = 0; k < lookAhead; k++) { + lines.push(`-${oldLines[i + k]}`) + } + i += lookAhead + foundMatch = true + break + } + if (j + lookAhead < newLines.length && oldLines[i] === newLines[j + lookAhead]) { + // New lines were added + for (let k = 0; k < lookAhead; k++) { + lines.push(`+${newLines[j + k]}`) + } + j += lookAhead + foundMatch = true + break + } + } + + if (!foundMatch) { + lines.push(`-${oldLines[i]}`) + lines.push(`+${newLines[j]}`) + i++ + j++ + } + } + } + + return lines.join("\n") +} + +// Similarity thresholds for block anchor fallback matching +const SINGLE_CANDIDATE_SIMILARITY_THRESHOLD = 0.0 +const MULTIPLE_CANDIDATES_SIMILARITY_THRESHOLD = 0.3 + +/** + * Levenshtein distance algorithm implementation + */ +function levenshtein(a: string, b: string): number { + if (a === "" || b === "") { + return Math.max(a.length, b.length) + } + const matrix = Array.from({ length: a.length + 1 }, (_, i) => + Array.from({ length: b.length + 1 }, (_, j) => (i === 0 ? j : j === 0 ? i : 0)) + ) + + for (let i = 1; i <= a.length; i++) { + for (let j = 1; j <= b.length; j++) { + const cost = a[i - 1] === b[j - 1] ? 0 : 1 + matrix[i][j] = Math.min( + matrix[i - 1][j] + 1, + matrix[i][j - 1] + 1, + matrix[i - 1][j - 1] + cost + ) + } + } + return matrix[a.length][b.length] +} + +export type Replacer = (content: string, find: string) => Generator + +export const SimpleReplacer: Replacer = function* (_content, find) { + yield find +} + +export const LineTrimmedReplacer: Replacer = function* (content, find) { + const originalLines = content.split("\n") + const searchLines = find.split("\n") + + if (searchLines[searchLines.length - 1] === "") { + searchLines.pop() + } + + for (let i = 0; i <= originalLines.length - searchLines.length; i++) { + let matches = true + + for (let j = 0; j < searchLines.length; j++) { + const originalTrimmed = originalLines[i + j].trim() + const searchTrimmed = searchLines[j].trim() + + if (originalTrimmed !== searchTrimmed) { + matches = false + break + } + } + + if (matches) { + let matchStartIndex = 0 + for (let k = 0; k < i; k++) { + matchStartIndex += originalLines[k].length + 1 + } + + let matchEndIndex = matchStartIndex + for (let k = 0; k < searchLines.length; k++) { + matchEndIndex += originalLines[i + k].length + if (k < searchLines.length - 1) { + matchEndIndex += 1 + } + } + + yield content.substring(matchStartIndex, matchEndIndex) + } + } +} + +export const BlockAnchorReplacer: Replacer = function* (content, find) { + const originalLines = content.split("\n") + const searchLines = find.split("\n") + + if (searchLines.length < 3) { + return + } + + if (searchLines[searchLines.length - 1] === "") { + searchLines.pop() + } + + const firstLineSearch = searchLines[0].trim() + const lastLineSearch = searchLines[searchLines.length - 1].trim() + const searchBlockSize = searchLines.length + + const candidates: Array<{ startLine: number; endLine: number }> = [] + for (let i = 0; i < originalLines.length; i++) { + if (originalLines[i].trim() !== firstLineSearch) { + continue + } + + for (let j = i + 2; j < originalLines.length; j++) { + if (originalLines[j].trim() === lastLineSearch) { + candidates.push({ startLine: i, endLine: j }) + break + } + } + } + + if (candidates.length === 0) { + return + } + + if (candidates.length === 1) { + const { startLine, endLine } = candidates[0] + const actualBlockSize = endLine - startLine + 1 + + let similarity = 0 + let linesToCheck = Math.min(searchBlockSize - 2, actualBlockSize - 2) + + if (linesToCheck > 0) { + for (let j = 1; j < searchBlockSize - 1 && j < actualBlockSize - 1; j++) { + const originalLine = originalLines[startLine + j].trim() + const searchLine = searchLines[j].trim() + const maxLen = Math.max(originalLine.length, searchLine.length) + if (maxLen === 0) { + continue + } + const distance = levenshtein(originalLine, searchLine) + similarity += (1 - distance / maxLen) / linesToCheck + + if (similarity >= SINGLE_CANDIDATE_SIMILARITY_THRESHOLD) { + break + } + } + } else { + similarity = 1.0 + } + + if (similarity >= SINGLE_CANDIDATE_SIMILARITY_THRESHOLD) { + let matchStartIndex = 0 + for (let k = 0; k < startLine; k++) { + matchStartIndex += originalLines[k].length + 1 + } + let matchEndIndex = matchStartIndex + for (let k = startLine; k <= endLine; k++) { + matchEndIndex += originalLines[k].length + if (k < endLine) { + matchEndIndex += 1 + } + } + yield content.substring(matchStartIndex, matchEndIndex) + } + return + } + + let bestMatch: { startLine: number; endLine: number } | null = null + let maxSimilarity = -1 + + for (const candidate of candidates) { + const { startLine, endLine } = candidate + const actualBlockSize = endLine - startLine + 1 + + let similarity = 0 + let linesToCheck = Math.min(searchBlockSize - 2, actualBlockSize - 2) + + if (linesToCheck > 0) { + for (let j = 1; j < searchBlockSize - 1 && j < actualBlockSize - 1; j++) { + const originalLine = originalLines[startLine + j].trim() + const searchLine = searchLines[j].trim() + const maxLen = Math.max(originalLine.length, searchLine.length) + if (maxLen === 0) { + continue + } + const distance = levenshtein(originalLine, searchLine) + similarity += 1 - distance / maxLen + } + similarity /= linesToCheck + } else { + similarity = 1.0 + } + + if (similarity > maxSimilarity) { + maxSimilarity = similarity + bestMatch = candidate + } + } + + if (maxSimilarity >= MULTIPLE_CANDIDATES_SIMILARITY_THRESHOLD && bestMatch) { + const { startLine, endLine } = bestMatch + let matchStartIndex = 0 + for (let k = 0; k < startLine; k++) { + matchStartIndex += originalLines[k].length + 1 + } + let matchEndIndex = matchStartIndex + for (let k = startLine; k <= endLine; k++) { + matchEndIndex += originalLines[k].length + if (k < endLine) { + matchEndIndex += 1 + } + } + yield content.substring(matchStartIndex, matchEndIndex) + } +} + +export const WhitespaceNormalizedReplacer: Replacer = function* (content, find) { + const normalizeWhitespace = (text: string) => text.replace(/\s+/g, " ").trim() + const normalizedFind = normalizeWhitespace(find) + + const lines = content.split("\n") + for (let i = 0; i < lines.length; i++) { + const line = lines[i] + if (normalizeWhitespace(line) === normalizedFind) { + yield line + } else { + const normalizedLine = normalizeWhitespace(line) + if (normalizedLine.includes(normalizedFind)) { + const words = find.trim().split(/\s+/) + if (words.length > 0) { + const pattern = words.map((word) => word.replace(/[.*+?^${}()|[\]\\]/g, "\\$&")).join("\\s+") + try { + const regex = new RegExp(pattern) + const match = line.match(regex) + if (match) { + yield match[0] + } + } catch { + // Invalid regex pattern, skip + } + } + } + } + } + + const findLines = find.split("\n") + if (findLines.length > 1) { + for (let i = 0; i <= lines.length - findLines.length; i++) { + const block = lines.slice(i, i + findLines.length) + if (normalizeWhitespace(block.join("\n")) === normalizedFind) { + yield block.join("\n") + } + } + } +} + +export const IndentationFlexibleReplacer: Replacer = function* (content, find) { + const removeIndentation = (text: string) => { + const lines = text.split("\n") + const nonEmptyLines = lines.filter((line) => line.trim().length > 0) + if (nonEmptyLines.length === 0) return text + + const minIndent = Math.min( + ...nonEmptyLines.map((line) => { + const match = line.match(/^(\s*)/) + return match ? match[1].length : 0 + }) + ) + + return lines.map((line) => (line.trim().length === 0 ? line : line.slice(minIndent))).join("\n") + } + + const normalizedFind = removeIndentation(find) + const contentLines = content.split("\n") + const findLines = find.split("\n") + + for (let i = 0; i <= contentLines.length - findLines.length; i++) { + const block = contentLines.slice(i, i + findLines.length).join("\n") + if (removeIndentation(block) === normalizedFind) { + yield block + } + } +} + +export const EscapeNormalizedReplacer: Replacer = function* (content, find) { + const unescapeString = (str: string): string => { + return str.replace(/\\(n|t|r|'|"|`|\\|\n|\$)/g, (match, capturedChar) => { + switch (capturedChar) { + case "n": + return "\n" + case "t": + return "\t" + case "r": + return "\r" + case "'": + return "'" + case '"': + return '"' + case "`": + return "`" + case "\\": + return "\\" + case "\n": + return "\n" + case "$": + return "$" + default: + return match + } + }) + } + + const unescapedFind = unescapeString(find) + + if (content.includes(unescapedFind)) { + yield unescapedFind + } + + const lines = content.split("\n") + const findLines = unescapedFind.split("\n") + + for (let i = 0; i <= lines.length - findLines.length; i++) { + const block = lines.slice(i, i + findLines.length).join("\n") + const unescapedBlock = unescapeString(block) + + if (unescapedBlock === unescapedFind) { + yield block + } + } +} + +export const MultiOccurrenceReplacer: Replacer = function* (content, find) { + let startIndex = 0 + + while (true) { + const index = content.indexOf(find, startIndex) + if (index === -1) break + + yield find + startIndex = index + find.length + } +} + +export const TrimmedBoundaryReplacer: Replacer = function* (content, find) { + const trimmedFind = find.trim() + + if (trimmedFind === find) { + return + } + + if (content.includes(trimmedFind)) { + yield trimmedFind + } + + const lines = content.split("\n") + const findLines = find.split("\n") + + for (let i = 0; i <= lines.length - findLines.length; i++) { + const block = lines.slice(i, i + findLines.length).join("\n") + + if (block.trim() === trimmedFind) { + yield block + } + } +} + +export const ContextAwareReplacer: Replacer = function* (content, find) { + const findLines = find.split("\n") + if (findLines.length < 3) { + return + } + + if (findLines[findLines.length - 1] === "") { + findLines.pop() + } + + const contentLines = content.split("\n") + + const firstLine = findLines[0].trim() + const lastLine = findLines[findLines.length - 1].trim() + + for (let i = 0; i < contentLines.length; i++) { + if (contentLines[i].trim() !== firstLine) continue + + for (let j = i + 2; j < contentLines.length; j++) { + if (contentLines[j].trim() === lastLine) { + const blockLines = contentLines.slice(i, j + 1) + const block = blockLines.join("\n") + + if (blockLines.length === findLines.length) { + let matchingLines = 0 + let totalNonEmptyLines = 0 + + for (let k = 1; k < blockLines.length - 1; k++) { + const blockLine = blockLines[k].trim() + const findLine = findLines[k].trim() + + if (blockLine.length > 0 || findLine.length > 0) { + totalNonEmptyLines++ + if (blockLine === findLine) { + matchingLines++ + } + } + } + + if (totalNonEmptyLines === 0 || matchingLines / totalNonEmptyLines >= 0.5) { + yield block + break + } + } + break + } + } + } +} + +export function replace( + content: string, + oldString: string, + newString: string, + replaceAll = false +): string { + if (oldString === newString) { + throw new Error("oldString and newString must be different") + } + + let notFound = true + + for (const replacer of [ + SimpleReplacer, + LineTrimmedReplacer, + BlockAnchorReplacer, + WhitespaceNormalizedReplacer, + IndentationFlexibleReplacer, + EscapeNormalizedReplacer, + TrimmedBoundaryReplacer, + ContextAwareReplacer, + MultiOccurrenceReplacer, + ]) { + for (const search of replacer(content, oldString)) { + const index = content.indexOf(search) + if (index === -1) continue + notFound = false + if (replaceAll) { + return content.replaceAll(search, newString) + } + const lastIndex = content.lastIndexOf(search) + if (index !== lastIndex) continue + return content.substring(0, index) + newString + content.substring(index + search.length) + } + } + + if (notFound) { + throw new Error("oldString not found in content") + } + throw new Error( + "Found multiple matches for oldString. Provide more surrounding lines in oldString to identify the correct match." + ) +} diff --git a/packages/kiana/src/tool/glob.ts b/packages/kiana/src/tool/glob.ts new file mode 100644 index 00000000000..f65f85d9f99 --- /dev/null +++ b/packages/kiana/src/tool/glob.ts @@ -0,0 +1,78 @@ +import { z } from "zod" +import * as fs from "node:fs" +import * as path from "node:path" +import { defineTool } from "./tool.js" +import { listFiles } from "../util/ripgrep.js" + +const DESCRIPTION = `- Fast file pattern matching tool that works with any codebase size +- Supports glob patterns like "**/*.js" or "src/**/*.ts" +- Returns matching file paths sorted by modification time +- Use this tool when you need to find files by name patterns +- When you are doing an open ended search that may require multiple rounds of globbing and grepping, use the Task tool instead +- You have the capability to call multiple tools in a single response. It is always better to speculatively perform multiple searches as a batch that are potentially useful.` + +export const globTool = defineTool("glob", { + description: DESCRIPTION, + parameters: z.object({ + pattern: z.string().describe("The glob pattern to match files against"), + path: z + .string() + .optional() + .describe( + `The directory to search in. If not specified, the current working directory will be used. IMPORTANT: Omit this field to use the default directory. DO NOT enter "undefined" or "null" - simply omit it for the default behavior. Must be a valid directory path if provided.` + ), + }), + async execute(params, ctx) { + let search = params.path ?? ctx.workingDirectory + search = path.isAbsolute(search) ? search : path.resolve(ctx.workingDirectory, search) + + const limit = 100 + const files: Array<{ path: string; mtime: number }> = [] + let truncated = false + + for await (const file of listFiles({ + cwd: search, + glob: [params.pattern], + })) { + if (files.length >= limit) { + truncated = true + break + } + const full = path.resolve(search, file) + let mtime = 0 + try { + const stats = fs.statSync(full) + mtime = stats.mtime.getTime() + } catch { + // File might have been deleted + } + files.push({ + path: full, + mtime, + }) + } + + // Sort by modification time (newest first) + files.sort((a, b) => b.mtime - a.mtime) + + const output: string[] = [] + if (files.length === 0) { + output.push("No files found") + } else { + output.push(...files.map((f) => f.path)) + if (truncated) { + output.push("") + output.push("(Results are truncated. Consider using a more specific path or pattern.)") + } + } + + return { + title: path.relative(ctx.workingDirectory, search), + metadata: { + count: files.length, + truncated, + }, + output: output.join("\n"), + } + }, +}) diff --git a/packages/kiana/src/tool/grep.ts b/packages/kiana/src/tool/grep.ts new file mode 100644 index 00000000000..bec2583e903 --- /dev/null +++ b/packages/kiana/src/tool/grep.ts @@ -0,0 +1,113 @@ +import { z } from "zod" +import * as fs from "node:fs" +import * as path from "node:path" +import { defineTool } from "./tool.js" +import { searchContent } from "../util/ripgrep.js" + +const MAX_LINE_LENGTH = 2000 + +const DESCRIPTION = `- Fast content search tool that works with any codebase size +- Searches file contents using regular expressions +- Supports full regex syntax (eg. "log.*Error", "function\\s+\\w+", etc.) +- Filter files by pattern with the include parameter (eg. "*.js", "*.{ts,tsx}") +- Returns file paths with at least one match sorted by modification time +- Use this tool when you need to find files containing specific patterns +- If you need to identify/count the number of matches within files, use the Bash tool with \`rg\` (ripgrep) directly. Do NOT use \`grep\`. +- When you are doing an open ended search that may require multiple rounds of globbing and grepping, use the Task tool instead` + +export const grepTool = defineTool("grep", { + description: DESCRIPTION, + parameters: z.object({ + pattern: z.string().describe("The regex pattern to search for in file contents"), + path: z + .string() + .optional() + .describe("The directory to search in. Defaults to the current working directory."), + include: z + .string() + .optional() + .describe('File pattern to include in the search (e.g. "*.js", "*.{ts,tsx}")'), + }), + async execute(params, ctx) { + if (!params.pattern) { + throw new Error("pattern is required") + } + + const searchPath = params.path || ctx.workingDirectory + + const rawMatches = await searchContent({ + cwd: searchPath, + pattern: params.pattern, + glob: params.include, + }) + + // Add modification time to matches + const matches: Array<{ + path: string + modTime: number + lineNum: number + lineText: string + }> = [] + + for (const match of rawMatches) { + let modTime = 0 + try { + const stats = fs.statSync(match.path) + modTime = stats.mtime.getTime() + } catch { + // File might have been deleted + } + matches.push({ + ...match, + modTime, + }) + } + + // Sort by modification time (newest first) + matches.sort((a, b) => b.modTime - a.modTime) + + const limit = 100 + const truncated = matches.length > limit + const finalMatches = truncated ? matches.slice(0, limit) : matches + + if (finalMatches.length === 0) { + return { + title: params.pattern, + metadata: { matches: 0, truncated: false }, + output: "No files found", + } + } + + const outputLines = [`Found ${finalMatches.length} matches`] + + let currentFile = "" + for (const match of finalMatches) { + if (currentFile !== match.path) { + if (currentFile !== "") { + outputLines.push("") + } + currentFile = match.path + outputLines.push(`${match.path}:`) + } + const truncatedLineText = + match.lineText.length > MAX_LINE_LENGTH + ? match.lineText.substring(0, MAX_LINE_LENGTH) + "..." + : match.lineText + outputLines.push(` Line ${match.lineNum}: ${truncatedLineText}`) + } + + if (truncated) { + outputLines.push("") + outputLines.push("(Results are truncated. Consider using a more specific path or pattern.)") + } + + return { + title: params.pattern, + metadata: { + matches: finalMatches.length, + truncated, + }, + output: outputLines.join("\n"), + } + }, +}) diff --git a/packages/kiana/src/tool/index.ts b/packages/kiana/src/tool/index.ts new file mode 100644 index 00000000000..439002c29c8 --- /dev/null +++ b/packages/kiana/src/tool/index.ts @@ -0,0 +1,82 @@ +import { Tool } from "./tool.js" +import { bashTool } from "./bash.js" +import { readTool } from "./read.js" +import { writeTool } from "./write.js" +import { editTool } from "./edit.js" +import { globTool } from "./glob.js" +import { grepTool } from "./grep.js" +import { listTool } from "./list.js" +import { webfetchTool } from "./webfetch.js" +import { websearchTool } from "./websearch.js" +import { codesearchTool } from "./codesearch.js" +import { todoWriteTool, todoReadTool } from "./todo.js" +import { taskTool, setSubagentExecutor } from "./task.js" +import { invalidTool } from "./invalid.js" + +// Export individual tools +export { bashTool } from "./bash.js" +export { readTool } from "./read.js" +export { writeTool } from "./write.js" +export { editTool } from "./edit.js" +export { globTool } from "./glob.js" +export { grepTool } from "./grep.js" +export { listTool } from "./list.js" +export { webfetchTool } from "./webfetch.js" +export { websearchTool } from "./websearch.js" +export { codesearchTool } from "./codesearch.js" +export { todoWriteTool, todoReadTool, getTodos, setTodos } from "./todo.js" +export { taskTool, setSubagentExecutor } from "./task.js" +export { invalidTool } from "./invalid.js" + +// Export tool types +export type { Tool, ToolContext, ToolResult } from "./tool.js" +export { defineTool } from "./tool.js" + +// All available tools +export const allTools: Record> = { + bash: bashTool, + read: readTool, + write: writeTool, + edit: editTool, + glob: globTool, + grep: grepTool, + list: listTool, + webfetch: webfetchTool, + websearch: websearchTool, + codesearch: codesearchTool, + todowrite: todoWriteTool, + todoread: todoReadTool, + task: taskTool, + invalid: invalidTool, +} + +// Get tools based on config (null = all tools) +export function getTools(enabled?: string[] | null): Record> { + if (!enabled) { + return { ...allTools } + } + + const result: Record> = {} + for (const name of enabled) { + const tool = allTools[name.toLowerCase()] + if (tool) { + result[name.toLowerCase()] = tool + } + } + return result +} + +// Get tool info for API responses +export interface ToolInfo { + name: string + description: string + parameters: Record +} + +export function getToolInfo(tools: Record>): ToolInfo[] { + return Object.entries(tools).map(([name, tool]) => ({ + name, + description: tool.description, + parameters: tool.parameters._def, + })) +} diff --git a/packages/kiana/src/tool/invalid.ts b/packages/kiana/src/tool/invalid.ts new file mode 100644 index 00000000000..2f0329f08c4 --- /dev/null +++ b/packages/kiana/src/tool/invalid.ts @@ -0,0 +1,21 @@ +import { z } from "zod" +import { defineTool } from "./tool.js" + +/** + * Invalid tool - receives redirected parse errors from experimental_repairToolCall. + * This allows the LLM to see the error as a normal tool result and retry with corrected arguments. + */ +export const invalidTool = defineTool("invalid", { + description: "Do not use", + parameters: z.object({ + tool: z.string(), + error: z.string(), + }), + async execute(params) { + return { + title: "Invalid Tool", + output: `The arguments provided to the tool are invalid: ${params.error}`, + metadata: {}, + } + }, +}) diff --git a/packages/kiana/src/tool/list.ts b/packages/kiana/src/tool/list.ts new file mode 100644 index 00000000000..0860a3b4d94 --- /dev/null +++ b/packages/kiana/src/tool/list.ts @@ -0,0 +1,115 @@ +import { z } from "zod" +import * as path from "node:path" +import { defineTool } from "./tool.js" +import { listFiles } from "../util/ripgrep.js" + +const IGNORE_PATTERNS = [ + "node_modules/", + "__pycache__/", + ".git/", + "dist/", + "build/", + "target/", + "vendor/", + "bin/", + "obj/", + ".idea/", + ".vscode/", + ".zig-cache/", + "zig-out", + ".coverage", + "coverage/", + "tmp/", + "temp/", + ".cache/", + "cache/", + "logs/", + ".venv/", + "venv/", + "env/", +] + +const LIMIT = 100 + +const DESCRIPTION = `Lists files and directories in a given path. The path parameter must be absolute; omit it to use the current workspace directory. You can optionally provide an array of glob patterns to ignore with the ignore parameter. You should generally prefer the Glob and Grep tools, if you know which directories to search.` + +export const listTool = defineTool("list", { + description: DESCRIPTION, + parameters: z.object({ + path: z + .string() + .describe("The absolute path to the directory to list (must be absolute, not relative)") + .optional(), + ignore: z.array(z.string()).describe("List of glob patterns to ignore").optional(), + }), + async execute(params, ctx) { + const searchPath = path.resolve(ctx.workingDirectory, params.path || ".") + + const ignoreGlobs = IGNORE_PATTERNS.map((p) => `!${p}*`).concat( + params.ignore?.map((p) => `!${p}`) || [] + ) + + const files: string[] = [] + for await (const file of listFiles({ cwd: searchPath, glob: ignoreGlobs })) { + files.push(file) + if (files.length >= LIMIT) break + } + + // Build directory structure + const dirs = new Set() + const filesByDir = new Map() + + for (const file of files) { + const dir = path.dirname(file) + const parts = dir === "." ? [] : dir.split("/") + + // Add all parent directories + for (let i = 0; i <= parts.length; i++) { + const dirPath = i === 0 ? "." : parts.slice(0, i).join("/") + dirs.add(dirPath) + } + + // Add file to its directory + if (!filesByDir.has(dir)) filesByDir.set(dir, []) + filesByDir.get(dir)!.push(path.basename(file)) + } + + function renderDir(dirPath: string, depth: number): string { + const indent = " ".repeat(depth) + let output = "" + + if (depth > 0) { + output += `${indent}${path.basename(dirPath)}/\n` + } + + const childIndent = " ".repeat(depth + 1) + const children = Array.from(dirs) + .filter((d) => path.dirname(d) === dirPath && d !== dirPath) + .sort() + + // Render subdirectories first + for (const child of children) { + output += renderDir(child, depth + 1) + } + + // Render files + const dirFiles = filesByDir.get(dirPath) || [] + for (const file of dirFiles.sort()) { + output += `${childIndent}${file}\n` + } + + return output + } + + const output = `${searchPath}/\n` + renderDir(".", 0) + + return { + title: path.relative(ctx.workingDirectory, searchPath), + metadata: { + count: files.length, + truncated: files.length >= LIMIT, + }, + output, + } + }, +}) diff --git a/packages/kiana/src/tool/read.ts b/packages/kiana/src/tool/read.ts new file mode 100644 index 00000000000..c838df640e0 --- /dev/null +++ b/packages/kiana/src/tool/read.ts @@ -0,0 +1,259 @@ +import { z } from "zod" +import * as fs from "node:fs" +import * as path from "node:path" +import { defineTool } from "./tool.js" + +const DEFAULT_READ_LIMIT = 2000 +const MAX_LINE_LENGTH = 2000 + +const DESCRIPTION = `Reads a file from the local filesystem. You can access any file directly by using this tool. +Assume this tool is able to read all files on the machine. If the User provides a path to a file assume that path is valid. It is okay to read a file that does not exist; an error will be returned. + +Usage: +- The filePath parameter must be an absolute path, not a relative path +- By default, it reads up to 2000 lines starting from the beginning of the file +- You can optionally specify a line offset and limit (especially handy for long files), but it's recommended to read the whole file by not providing these parameters +- Any lines longer than 2000 characters will be truncated +- Results are returned using cat -n format, with line numbers starting at 1 +- You have the capability to call multiple tools in a single response. It is always better to speculatively read multiple files as a batch that are potentially useful. +- If you read a file that exists but has empty contents you will receive a system reminder warning in place of file contents. +- You can read image files using this tool.` + +export const readTool = defineTool("read", { + description: DESCRIPTION, + parameters: z.object({ + filePath: z.string().describe("The path to the file to read"), + offset: z.coerce + .number() + .describe("The line number to start reading from (0-based)") + .optional(), + limit: z.coerce + .number() + .describe("The number of lines to read (defaults to 2000)") + .optional(), + }), + async execute(params, ctx) { + let filepath = params.filePath + if (!path.isAbsolute(filepath)) { + filepath = path.join(ctx.workingDirectory, filepath) + } + const title = path.relative(ctx.workingDirectory, filepath) + + // Block .env files (except samples/examples) + const whitelist = [".env.sample", ".example"] + const block = (() => { + if (whitelist.some((w) => filepath.endsWith(w))) return false + if (filepath.includes(".env")) return true + return false + })() + + if (block) { + throw new Error( + `The user has blocked you from reading ${filepath}, DO NOT make further attempts to read it` + ) + } + + // Check file exists + if (!fs.existsSync(filepath)) { + const dir = path.dirname(filepath) + const base = path.basename(filepath) + + try { + const dirEntries = fs.readdirSync(dir) + const suggestions = dirEntries + .filter( + (entry) => + entry.toLowerCase().includes(base.toLowerCase()) || + base.toLowerCase().includes(entry.toLowerCase()) + ) + .map((entry) => path.join(dir, entry)) + .slice(0, 3) + + if (suggestions.length > 0) { + throw new Error( + `File not found: ${filepath}\n\nDid you mean one of these?\n${suggestions.join("\n")}` + ) + } + } catch (e) { + if ((e as Error).message.startsWith("File not found:")) throw e + // Directory doesn't exist, fall through + } + + throw new Error(`File not found: ${filepath}`) + } + + const stats = fs.statSync(filepath) + if (stats.isDirectory()) { + throw new Error(`Path is a directory, not a file: ${filepath}`) + } + + // Check if image + const imageType = isImageFile(filepath) + if (imageType) { + const content = fs.readFileSync(filepath) + const mime = getMimeType(filepath) + const msg = "Image read successfully" + return { + title, + output: msg, + metadata: { preview: msg }, + attachments: [ + { + id: `part_${Date.now()}`, + sessionID: ctx.sessionID, + messageID: ctx.messageID, + type: "file" as const, + mime, + url: `data:${mime};base64,${content.toString("base64")}`, + }, + ], + } + } + + // Check if binary + if (isBinaryFile(filepath, stats.size)) { + throw new Error(`Cannot read binary file: ${filepath}`) + } + + // Read text file + const content = fs.readFileSync(filepath, "utf-8") + const lines = content.split("\n") + const limit = params.limit ?? DEFAULT_READ_LIMIT + const offset = params.offset || 0 + + const raw = lines.slice(offset, offset + limit).map((line) => { + return line.length > MAX_LINE_LENGTH + ? line.substring(0, MAX_LINE_LENGTH) + "..." + : line + }) + + // Format with line numbers (matching cat -n format: padded line number + tab) + const numbered = raw.map((line, index) => { + return `${(index + offset + 1).toString().padStart(5, " ")}\t${line}` + }) + + const preview = raw.slice(0, 20).join("\n") + + let output = "\n" + output += numbered.join("\n") + + const totalLines = lines.length + const lastReadLine = offset + raw.length + const hasMoreLines = totalLines > lastReadLine + + if (hasMoreLines) { + output += `\n\n(File has more lines. Use 'offset' parameter to read beyond line ${lastReadLine})` + } else { + output += `\n\n(End of file - total ${totalLines} lines)` + } + output += "\n" + + return { + title, + output, + metadata: { preview }, + } + }, +}) + +function isImageFile(filePath: string): string | false { + const ext = path.extname(filePath).toLowerCase() + switch (ext) { + case ".jpg": + case ".jpeg": + return "JPEG" + case ".png": + return "PNG" + case ".gif": + return "GIF" + case ".bmp": + return "BMP" + case ".webp": + return "WebP" + default: + return false + } +} + +function getMimeType(filePath: string): string { + const ext = path.extname(filePath).toLowerCase() + switch (ext) { + case ".jpg": + case ".jpeg": + return "image/jpeg" + case ".png": + return "image/png" + case ".gif": + return "image/gif" + case ".bmp": + return "image/bmp" + case ".webp": + return "image/webp" + default: + return "application/octet-stream" + } +} + +function isBinaryFile(filepath: string, fileSize: number): boolean { + const ext = path.extname(filepath).toLowerCase() + + // Known binary extensions + const binaryExtensions = new Set([ + ".zip", + ".tar", + ".gz", + ".exe", + ".dll", + ".so", + ".class", + ".jar", + ".war", + ".7z", + ".doc", + ".docx", + ".xls", + ".xlsx", + ".ppt", + ".pptx", + ".odt", + ".ods", + ".odp", + ".bin", + ".dat", + ".obj", + ".o", + ".a", + ".lib", + ".wasm", + ".pyc", + ".pyo", + ]) + + if (binaryExtensions.has(ext)) { + return true + } + + if (fileSize === 0) return false + + // Read first 4KB and check for binary content + const bufferSize = Math.min(4096, fileSize) + const buffer = Buffer.alloc(bufferSize) + const fd = fs.openSync(filepath, "r") + const bytesRead = fs.readSync(fd, buffer, 0, bufferSize, 0) + fs.closeSync(fd) + + if (bytesRead === 0) return false + + let nonPrintableCount = 0 + for (let i = 0; i < bytesRead; i++) { + // Null byte is definite binary indicator + if (buffer[i] === 0) return true + // Count non-printable characters (excluding common whitespace) + if (buffer[i] < 9 || (buffer[i] > 13 && buffer[i] < 32)) { + nonPrintableCount++ + } + } + + // If >30% non-printable, consider binary + return nonPrintableCount / bytesRead > 0.3 +} diff --git a/packages/kiana/src/tool/task.ts b/packages/kiana/src/tool/task.ts new file mode 100644 index 00000000000..b38d91ce6d7 --- /dev/null +++ b/packages/kiana/src/tool/task.ts @@ -0,0 +1,164 @@ +import { z } from "zod" +import { defineTool, ToolContext } from "./tool.js" + +const DESCRIPTION = `Launch a new agent to handle complex, multi-step tasks autonomously. + +Available agent types: +- general-purpose: General-purpose agent for researching complex questions, searching for code, and executing multi-step tasks. +- explore: Fast agent specialized for exploring codebases. Use this when you need to quickly find files by patterns, search code for keywords, or answer questions about the codebase. + +When using the Task tool, you must specify a subagent_type parameter to select which agent type to use. + +When to use the Task tool: +- When you are instructed to execute custom slash commands +- When exploring large codebases +- When tasks require multiple rounds of searching and analysis + +When NOT to use the Task tool: +- If you want to read a specific file path, use the Read or Glob tool instead +- If you are searching for a specific class definition, use the Glob tool instead +- If you are searching for code within a specific file or set of 2-3 files, use the Read tool instead + +Usage notes: +1. Launch multiple agents concurrently whenever possible for maximum performance +2. When the agent is done, it will return a single message back to you +3. Each agent invocation is stateless - provide a highly detailed task description +4. The agent's outputs should generally be trusted +5. Clearly tell the agent whether you expect it to write code or just to do research` + +// Agent configuration for subagents +interface AgentConfig { + name: string + description: string + systemPrompt: string +} + +const AGENTS: Record = { + "general-purpose": { + name: "general-purpose", + description: "General-purpose agent for researching complex questions and executing multi-step tasks", + systemPrompt: `You are a helpful coding assistant running as a subagent. Complete the task autonomously and return a concise summary of your findings or actions. + +When searching for code or files: +- Use Glob to find files by patterns +- Use Grep to search file contents +- Use Read to examine specific files + +Be thorough but efficient. Return only the essential information needed to answer the question or complete the task.`, + }, + explore: { + name: "explore", + description: "Fast agent specialized for exploring codebases", + systemPrompt: `You are a fast code exploration agent. Your job is to quickly find files and code patterns in the codebase. + +Use these tools efficiently: +- Glob: Find files by pattern (e.g., "**/*.ts", "src/**/*.tsx") +- Grep: Search file contents for keywords or patterns +- Read: Examine specific files when needed + +Be quick and focused. Return a concise summary of what you found.`, + }, +} + +// Subagent session executor type - will be injected by the session manager +export type SubagentExecutor = (params: { + sessionID: string + parentSessionID: string + prompt: string + agentConfig: AgentConfig + agentType: string + tools: Record + depth: number // nesting level (1 = direct subagent, 2 = subagent of subagent) +}) => Promise<{ + output: string + sessionID: string +}> + +// This will be set by the session manager +let subagentExecutor: SubagentExecutor | null = null + +export function setSubagentExecutor(executor: SubagentExecutor) { + subagentExecutor = executor +} + +export const taskTool = defineTool("task", { + description: DESCRIPTION, + parameters: z.object({ + description: z.string().describe("A short (3-5 words) description of the task"), + prompt: z.string().describe("The task for the agent to perform"), + subagent_type: z.string().describe("The type of specialized agent to use for this task"), + session_id: z.string().describe("Existing Task session to continue").optional(), + }), + async execute(params, ctx) { + const agent = AGENTS[params.subagent_type] + if (!agent) { + const availableAgents = Object.keys(AGENTS).join(", ") + throw new Error( + `Unknown agent type: ${params.subagent_type}. Available agents: ${availableAgents}` + ) + } + + // Update metadata to show task in progress + ctx.metadata({ + title: params.description, + metadata: { + agentType: params.subagent_type, + status: "running", + }, + }) + + if (!subagentExecutor) { + // Fallback: Return a message indicating subagent execution is not available + // This happens when the task tool is used without a proper session manager + return { + title: params.description, + metadata: { + agentType: params.subagent_type, + status: "skipped", + }, + output: `Subagent execution is not available in this context. Task: ${params.prompt}`, + } + } + + try { + const result = await subagentExecutor({ + sessionID: params.session_id || `task-${Date.now()}`, + parentSessionID: ctx.sessionID, + prompt: params.prompt, + agentConfig: agent, + agentType: params.subagent_type, + tools: { + todowrite: false, + todoread: false, + task: false, // Prevent recursive task calls + }, + depth: 1, // Direct subagent call from parent + }) + + const output = + result.output + + "\n\n" + + ["", `session_id: ${result.sessionID}`, ""].join("\n") + + return { + title: params.description, + metadata: { + sessionId: result.sessionID, + status: "completed", + }, + output, + } + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error) + return { + title: params.description, + metadata: { + agentType: params.subagent_type, + status: "error", + error: errorMessage, + }, + output: `Task failed: ${errorMessage}`, + } + } + }, +}) diff --git a/packages/kiana/src/tool/todo.ts b/packages/kiana/src/tool/todo.ts new file mode 100644 index 00000000000..649b36c17c3 --- /dev/null +++ b/packages/kiana/src/tool/todo.ts @@ -0,0 +1,109 @@ +import { z } from "zod" +import { defineTool } from "./tool.js" + +const DESCRIPTION_WRITE = `Use this tool to create and manage a structured task list for your current coding session. This helps you track progress, organize complex tasks, and demonstrate thoroughness to the user. +It also helps the user understand the progress of the task and overall progress of their requests. + +## When to Use This Tool +Use this tool proactively in these scenarios: + +1. Complex multi-step tasks - When a task requires 3 or more distinct steps or actions +2. Non-trivial and complex tasks - Tasks that require careful planning or multiple operations +3. User explicitly requests todo list - When the user directly asks you to use the todo list +4. User provides multiple tasks - When users provide a list of things to be done (numbered or comma-separated) +5. After receiving new instructions - Immediately capture user requirements as todos. Feel free to edit the todo list based on new information. +6. After completing a task - Mark it complete and add any new follow-up tasks +7. When you start working on a new task, mark the todo as in_progress. Ideally you should only have one todo as in_progress at a time. Complete existing tasks before starting new ones. + +## When NOT to Use This Tool + +Skip using this tool when: +1. There is only a single, straightforward task +2. The task is trivial and tracking it provides no organizational benefit +3. The task can be completed in less than 3 trivial steps +4. The task is purely conversational or informational + +NOTE that you should not use this tool if there is only one trivial task to do. In this case you are better off just doing the task directly. + +## Task States and Management + +1. **Task States**: Use these states to track progress: + - pending: Task not yet started + - in_progress: Currently working on (limit to ONE task at a time) + - completed: Task finished successfully + +2. **Task Management**: + - Update task status in real-time as you work + - Mark tasks complete IMMEDIATELY after finishing (don't batch completions) + - Only have ONE task in_progress at any time + - Complete current tasks before starting new ones + +3. **Task Breakdown**: + - Create specific, actionable items + - Break complex tasks into smaller, manageable steps + - Use clear, descriptive task names + +When in doubt, use this tool. Being proactive with task management demonstrates attentiveness and ensures you complete all requirements successfully.` + +const DESCRIPTION_READ = `Use this tool to read your current todo list` + +// Schema for todo items +const TodoInfoSchema = z.object({ + content: z.string().describe("The task description"), + status: z + .enum(["pending", "in_progress", "completed"]) + .describe("The task status"), + activeForm: z + .string() + .describe("The present continuous form of the task (e.g., 'Implementing feature')"), +}) + +export type TodoInfo = z.infer + +// In-memory todo storage per session +const todoStorage = new Map() + +export function getTodos(sessionID: string): TodoInfo[] { + return todoStorage.get(sessionID) || [] +} + +export function setTodos(sessionID: string, todos: TodoInfo[]): void { + todoStorage.set(sessionID, todos) +} + +export const todoWriteTool = defineTool("todowrite", { + description: DESCRIPTION_WRITE, + parameters: z.object({ + todos: z.array(TodoInfoSchema).describe("The updated todo list"), + }), + async execute(params, ctx) { + setTodos(ctx.sessionID, params.todos) + + const pendingCount = params.todos.filter((x) => x.status !== "completed").length + + return { + title: `${pendingCount} todos`, + output: JSON.stringify(params.todos, null, 2), + metadata: { + todos: params.todos, + }, + } + }, +}) + +export const todoReadTool = defineTool("todoread", { + description: DESCRIPTION_READ, + parameters: z.object({}), + async execute(_params, ctx) { + const todos = getTodos(ctx.sessionID) + const pendingCount = todos.filter((x) => x.status !== "completed").length + + return { + title: `${pendingCount} todos`, + metadata: { + todos, + }, + output: JSON.stringify(todos, null, 2), + } + }, +}) diff --git a/packages/kiana/src/tool/tool.ts b/packages/kiana/src/tool/tool.ts new file mode 100644 index 00000000000..287ae336356 --- /dev/null +++ b/packages/kiana/src/tool/tool.ts @@ -0,0 +1,65 @@ +import { z } from "zod" +import type { FilePart } from "../types/part.js" + +/** + * Tool execution context + */ +export interface ToolContext { + sessionID: string + messageID: string + abort: AbortSignal + callID?: string + workingDirectory: string + metadata(input: { title?: string; metadata?: Record }): void +} + +/** + * Tool execution result + */ +export interface ToolResult { + title: string + output: string + metadata: Record + attachments?: FilePart[] +} + +/** + * Tool definition + */ +export interface Tool

{ + id: string + description: string + parameters: P + execute(args: z.infer

, ctx: ToolContext): Promise +} + +/** + * Define a tool with validation + */ +export function defineTool

( + id: string, + config: { + description: string + parameters: P + execute(args: z.infer

, ctx: ToolContext): Promise + } +): Tool

{ + return { + id, + description: config.description, + parameters: config.parameters, + execute: async (args, ctx) => { + // Validate parameters + const result = config.parameters.safeParse(args) + if (!result.success) { + const errors = result.error.issues + .map((e: z.ZodIssue) => `${e.path.join(".")}: ${e.message}`) + .join(", ") + throw new Error( + `The ${id} tool was called with invalid arguments: ${errors}. Please fix the input.` + ) + } + return config.execute(result.data, ctx) + }, + } +} diff --git a/packages/kiana/src/tool/webfetch.ts b/packages/kiana/src/tool/webfetch.ts new file mode 100644 index 00000000000..00b7a57a206 --- /dev/null +++ b/packages/kiana/src/tool/webfetch.ts @@ -0,0 +1,242 @@ +import { z } from "zod" +import { defineTool } from "./tool.js" + +const MAX_RESPONSE_SIZE = 5 * 1024 * 1024 // 5MB +const DEFAULT_TIMEOUT = 30 * 1000 // 30 seconds +const MAX_TIMEOUT = 120 * 1000 // 2 minutes + +const DESCRIPTION = `- Fetches content from a specified URL +- Takes a URL and a format as input +- Fetches the URL content, converts HTML to markdown +- Returns the content in the specified format +- Use this tool when you need to retrieve and analyze web content + +Usage notes: + - IMPORTANT: if another tool is present that offers better web fetching capabilities, is more targeted to the task, or has fewer restrictions, prefer using that tool instead of this one. + - The URL must be a fully-formed valid URL + - HTTP URLs will be automatically upgraded to HTTPS + - This tool is read-only and does not modify any files + - Results may be summarized if the content is very large + - Includes a self-cleaning 15-minute cache for faster responses when repeatedly accessing the same URL` + +// Simple cache for responses +const cache = new Map() +const CACHE_TTL = 15 * 60 * 1000 // 15 minutes + +function cleanCache() { + const now = Date.now() + for (const [key, value] of cache) { + if (now - value.timestamp > CACHE_TTL) { + cache.delete(key) + } + } +} + +export const webfetchTool = defineTool("webfetch", { + description: DESCRIPTION, + parameters: z.object({ + url: z.string().describe("The URL to fetch content from"), + format: z + .enum(["text", "markdown", "html"]) + .describe("The format to return the content in (text, markdown, or html)"), + timeout: z.number().describe("Optional timeout in seconds (max 120)").optional(), + }), + async execute(params, ctx) { + // Validate URL + if (!params.url.startsWith("http://") && !params.url.startsWith("https://")) { + throw new Error("URL must start with http:// or https://") + } + + // Check cache + cleanCache() + const cacheKey = `${params.url}:${params.format}` + const cached = cache.get(cacheKey) + if (cached) { + const contentType = "text/html" // Cached, assume HTML + return { + output: cached.content, + title: `${params.url} (cached)`, + metadata: {}, + } + } + + const timeout = Math.min((params.timeout ?? DEFAULT_TIMEOUT / 1000) * 1000, MAX_TIMEOUT) + + const controller = new AbortController() + const timeoutId = setTimeout(() => controller.abort(), timeout) + + // Build Accept header based on requested format + let acceptHeader = "*/*" + switch (params.format) { + case "markdown": + acceptHeader = "text/markdown;q=1.0, text/x-markdown;q=0.9, text/plain;q=0.8, text/html;q=0.7, */*;q=0.1" + break + case "text": + acceptHeader = "text/plain;q=1.0, text/markdown;q=0.9, text/html;q=0.8, */*;q=0.1" + break + case "html": + acceptHeader = "text/html;q=1.0, application/xhtml+xml;q=0.9, text/plain;q=0.8, text/markdown;q=0.7, */*;q=0.1" + break + default: + acceptHeader = + "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8" + } + + let response: Response + try { + response = await fetch(params.url, { + signal: controller.signal, + headers: { + "User-Agent": + "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36", + Accept: acceptHeader, + "Accept-Language": "en-US,en;q=0.9", + }, + }) + } finally { + clearTimeout(timeoutId) + } + + if (!response.ok) { + throw new Error(`Request failed with status code: ${response.status}`) + } + + // Check content length + const contentLength = response.headers.get("content-length") + if (contentLength && parseInt(contentLength) > MAX_RESPONSE_SIZE) { + throw new Error("Response too large (exceeds 5MB limit)") + } + + const arrayBuffer = await response.arrayBuffer() + if (arrayBuffer.byteLength > MAX_RESPONSE_SIZE) { + throw new Error("Response too large (exceeds 5MB limit)") + } + + const content = new TextDecoder().decode(arrayBuffer) + const contentType = response.headers.get("content-type") || "" + + const title = `${params.url} (${contentType})` + + let output: string + + // Handle content based on requested format and actual content type + switch (params.format) { + case "markdown": + if (contentType.includes("text/html")) { + output = convertHTMLToMarkdown(content) + } else { + output = content + } + break + + case "text": + if (contentType.includes("text/html")) { + output = extractTextFromHTML(content) + } else { + output = content + } + break + + case "html": + default: + output = content + break + } + + // Cache the result + cache.set(cacheKey, { content: output, timestamp: Date.now() }) + + return { + output, + title, + metadata: {}, + } + }, +}) + +function extractTextFromHTML(html: string): string { + // Simple HTML to text conversion + let text = html + // Remove script and style tags with their content + .replace(/]*>[\s\S]*?<\/script>/gi, "") + .replace(/]*>[\s\S]*?<\/style>/gi, "") + .replace(/]*>[\s\S]*?<\/noscript>/gi, "") + // Replace common block elements with newlines + .replace(/<\/?(p|div|br|h[1-6]|li|tr)[^>]*>/gi, "\n") + // Remove all remaining tags + .replace(/<[^>]+>/g, "") + // Decode HTML entities + .replace(/ /g, " ") + .replace(/&/g, "&") + .replace(/</g, "<") + .replace(/>/g, ">") + .replace(/"/g, '"') + .replace(/&#(\d+);/g, (_, num) => String.fromCharCode(parseInt(num))) + // Clean up whitespace + .replace(/\n\s*\n/g, "\n\n") + .trim() + + return text +} + +function convertHTMLToMarkdown(html: string): string { + let md = html + // Remove script and style tags + .replace(/]*>[\s\S]*?<\/script>/gi, "") + .replace(/]*>[\s\S]*?<\/style>/gi, "") + .replace(/]*>[\s\S]*?<\/noscript>/gi, "") + + // Convert headings + md = md.replace(/]*>([\s\S]*?)<\/h1>/gi, "\n# $1\n") + md = md.replace(/]*>([\s\S]*?)<\/h2>/gi, "\n## $1\n") + md = md.replace(/]*>([\s\S]*?)<\/h3>/gi, "\n### $1\n") + md = md.replace(/]*>([\s\S]*?)<\/h4>/gi, "\n#### $1\n") + md = md.replace(/]*>([\s\S]*?)<\/h5>/gi, "\n##### $1\n") + md = md.replace(/]*>([\s\S]*?)<\/h6>/gi, "\n###### $1\n") + + // Convert links + md = md.replace(/]+href="([^"]*)"[^>]*>([\s\S]*?)<\/a>/gi, "[$2]($1)") + + // Convert images + md = md.replace(/]+src="([^"]*)"[^>]*alt="([^"]*)"[^>]*>/gi, "![$2]($1)") + md = md.replace(/]+src="([^"]*)"[^>]*>/gi, "![]($1)") + + // Convert bold and italic + md = md.replace(/<(strong|b)[^>]*>([\s\S]*?)<\/(strong|b)>/gi, "**$2**") + md = md.replace(/<(em|i)[^>]*>([\s\S]*?)<\/(em|i)>/gi, "*$2*") + + // Convert code + md = md.replace(/]*>([\s\S]*?)<\/code>/gi, "`$1`") + md = md.replace(/]*>([\s\S]*?)<\/pre>/gi, "\n```\n$1\n```\n") + + // Convert lists + md = md.replace(/]*>([\s\S]*?)<\/li>/gi, "\n- $1") + md = md.replace(/<\/?[ou]l[^>]*>/gi, "\n") + + // Convert paragraphs and divs + md = md.replace(/<\/p>/gi, "\n\n") + md = md.replace(/]*>/gi, "") + md = md.replace(//gi, "\n") + md = md.replace(/<\/div>/gi, "\n") + md = md.replace(/]*>/gi, "") + + // Convert horizontal rules + md = md.replace(/]*>/gi, "\n---\n") + + // Remove remaining tags + md = md.replace(/<[^>]+>/g, "") + + // Decode HTML entities + md = md + .replace(/ /g, " ") + .replace(/&/g, "&") + .replace(/</g, "<") + .replace(/>/g, ">") + .replace(/"/g, '"') + .replace(/&#(\d+);/g, (_, num) => String.fromCharCode(parseInt(num))) + + // Clean up whitespace + md = md.replace(/\n{3,}/g, "\n\n").trim() + + return md +} diff --git a/packages/kiana/src/tool/websearch.ts b/packages/kiana/src/tool/websearch.ts new file mode 100644 index 00000000000..d6fc010d99e --- /dev/null +++ b/packages/kiana/src/tool/websearch.ts @@ -0,0 +1,145 @@ +import { z } from "zod" +import { defineTool } from "./tool.js" + +const DESCRIPTION = `- Search the web using Exa AI - performs real-time web searches and can scrape content from specific URLs +- Provides up-to-date information for current events and recent data +- Supports configurable result counts and returns the content from the most relevant websites +- Use this tool for accessing information beyond knowledge cutoff +- Searches are performed automatically within a single API call + +Usage notes: + - Supports live crawling modes: 'fallback' (backup if cached unavailable) or 'preferred' (prioritize live crawling) + - Search types: 'auto' (balanced), 'fast' (quick results), 'deep' (comprehensive search) + - Configurable context length for optimal LLM integration + - Domain filtering and advanced search options available` + +const API_CONFIG = { + BASE_URL: "https://mcp.exa.ai", + ENDPOINTS: { + SEARCH: "/mcp", + }, + DEFAULT_NUM_RESULTS: 8, +} as const + +interface McpSearchRequest { + jsonrpc: string + id: number + method: string + params: { + name: string + arguments: { + query: string + numResults?: number + livecrawl?: "fallback" | "preferred" + type?: "auto" | "fast" | "deep" + contextMaxCharacters?: number + } + } +} + +interface McpSearchResponse { + jsonrpc: string + result: { + content: Array<{ + type: string + text: string + }> + } +} + +export const websearchTool = defineTool("websearch", { + description: DESCRIPTION, + parameters: z.object({ + query: z.string().describe("Websearch query"), + numResults: z.number().optional().describe("Number of search results to return (default: 8)"), + livecrawl: z + .enum(["fallback", "preferred"]) + .optional() + .describe( + "Live crawl mode - 'fallback': use live crawling as backup if cached content unavailable, 'preferred': prioritize live crawling (default: 'fallback')" + ), + type: z + .enum(["auto", "fast", "deep"]) + .optional() + .describe( + "Search type - 'auto': balanced search (default), 'fast': quick results, 'deep': comprehensive search" + ), + contextMaxCharacters: z + .number() + .optional() + .describe("Maximum characters for context string optimized for LLMs (default: 10000)"), + }), + async execute(params, ctx) { + const searchRequest: McpSearchRequest = { + jsonrpc: "2.0", + id: 1, + method: "tools/call", + params: { + name: "web_search_exa", + arguments: { + query: params.query, + type: params.type || "auto", + numResults: params.numResults || API_CONFIG.DEFAULT_NUM_RESULTS, + livecrawl: params.livecrawl || "fallback", + contextMaxCharacters: params.contextMaxCharacters, + }, + }, + } + + const controller = new AbortController() + const timeoutId = setTimeout(() => controller.abort(), 25000) + + try { + const headers: Record = { + accept: "application/json, text/event-stream", + "content-type": "application/json", + } + + const signals = ctx.abort ? [controller.signal, ctx.abort] : [controller.signal] + const response = await fetch(`${API_CONFIG.BASE_URL}${API_CONFIG.ENDPOINTS.SEARCH}`, { + method: "POST", + headers, + body: JSON.stringify(searchRequest), + signal: AbortSignal.any(signals), + }) + + clearTimeout(timeoutId) + + if (!response.ok) { + const errorText = await response.text() + throw new Error(`Search error (${response.status}): ${errorText}`) + } + + const responseText = await response.text() + + // Parse SSE response + const lines = responseText.split("\n") + for (const line of lines) { + if (line.startsWith("data: ")) { + const data: McpSearchResponse = JSON.parse(line.substring(6)) + if (data.result && data.result.content && data.result.content.length > 0) { + return { + output: data.result.content[0].text, + title: `Web search: ${params.query}`, + metadata: {}, + } + } + } + } + + return { + output: "No search results found. Please try a different query.", + title: `Web search: ${params.query}`, + metadata: {}, + } + } catch (error) { + clearTimeout(timeoutId) + + if (error instanceof Error && error.name === "AbortError") { + throw new Error("Search request timed out") + } + + throw error + } + }, +}) diff --git a/packages/kiana/src/tool/write.ts b/packages/kiana/src/tool/write.ts new file mode 100644 index 00000000000..253e4c39c59 --- /dev/null +++ b/packages/kiana/src/tool/write.ts @@ -0,0 +1,53 @@ +import { z } from "zod" +import * as fs from "node:fs" +import * as path from "node:path" +import { defineTool } from "./tool.js" + +const DESCRIPTION = `Writes a file to the local filesystem. + +Usage: +- This tool will overwrite the existing file if there is one at the provided path. +- If this is an existing file, you MUST use the Read tool first to read the file's contents. This tool will fail if you did not read the file first. +- ALWAYS prefer editing existing files in the codebase. NEVER write new files unless explicitly required. +- NEVER proactively create documentation files (*.md) or README files. Only create documentation files if explicitly requested by the User. +- Only use emojis if the user explicitly requests it. Avoid writing emojis to files unless asked.` + +export const writeTool = defineTool("write", { + description: DESCRIPTION, + parameters: z.object({ + content: z.string().describe("The content to write to the file"), + filePath: z + .string() + .describe( + "The absolute path to the file to write (must be absolute, not relative)" + ), + }), + async execute(params, ctx) { + const filepath = path.isAbsolute(params.filePath) + ? params.filePath + : path.join(ctx.workingDirectory, params.filePath) + + const exists = fs.existsSync(filepath) + + // Ensure parent directory exists + const dir = path.dirname(filepath) + if (!fs.existsSync(dir)) { + fs.mkdirSync(dir, { recursive: true }) + } + + // Write file + fs.writeFileSync(filepath, params.content, "utf-8") + + const title = path.relative(ctx.workingDirectory, filepath) + + return { + title, + metadata: { + diagnostics: {}, + filepath, + exists, + }, + output: "", + } + }, +}) diff --git a/packages/kiana/src/types/event.ts b/packages/kiana/src/types/event.ts new file mode 100644 index 00000000000..cc1cd11cc1e --- /dev/null +++ b/packages/kiana/src/types/event.ts @@ -0,0 +1,184 @@ +import { z } from "zod" +import { SessionInfoSchema, FileDiffSchema } from "./session.js" +import { MessageInfoSchema } from "./message.js" +import { PartSchema } from "./part.js" + +// Subagent context - added to events from subagent sessions +export const SubagentContextSchema = z.object({ + parentSessionID: z.string(), + depth: z.number(), // 1 = direct subagent, 2 = subagent of subagent, etc. + agentType: z.string().optional(), // e.g., "Explore", "Plan", etc. +}) + +export type SubagentContext = z.infer + +// Session events +export const SessionCreatedEventSchema = z.object({ + type: z.literal("session.created"), + properties: z.object({ + session: SessionInfoSchema, + }), +}) + +export const SessionUpdatedEventSchema = z.object({ + type: z.literal("session.updated"), + properties: z.object({ + session: SessionInfoSchema, + }), +}) + +export const SessionDeletedEventSchema = z.object({ + type: z.literal("session.deleted"), + properties: z.object({ + session: SessionInfoSchema, + }), +}) + +export const SessionIdleEventSchema = z.object({ + type: z.literal("session.idle"), + properties: z.object({ + sessionID: z.string(), + }), +}) + +export const SessionCompactedEventSchema = z.object({ + type: z.literal("session.compacted"), + properties: z.object({ + sessionID: z.string(), + }), +}) + +export const SessionDiffEventSchema = z.object({ + type: z.literal("session.diff"), + properties: z.object({ + sessionID: z.string(), + diff: z.array(FileDiffSchema), + }), +}) + +export const SessionStatusEventSchema = z.object({ + type: z.literal("session.status"), + properties: z.object({ + sessionID: z.string(), + status: z.object({ + state: z.enum(["idle", "running", "error"]), + message: z.string().optional(), + }), + }), +}) + +// Message events +export const MessageCreatedEventSchema = z.object({ + type: z.literal("message.created"), + properties: z.object({ + message: MessageInfoSchema, + }), +}) + +export const MessageUpdatedEventSchema = z.object({ + type: z.literal("message.updated"), + properties: z.object({ + message: MessageInfoSchema, + }), +}) + +export const MessageRemovedEventSchema = z.object({ + type: z.literal("message.removed"), + properties: z.object({ + sessionID: z.string(), + messageID: z.string(), + }), +}) + +export const MessagePartUpdatedEventSchema = z.object({ + type: z.literal("message.part.updated"), + properties: z.object({ + part: PartSchema, + delta: z.string().optional(), + }), +}) + +export const MessagePartRemovedEventSchema = z.object({ + type: z.literal("message.part.removed"), + properties: z.object({ + sessionID: z.string(), + messageID: z.string(), + partID: z.string(), + }), +}) + +// Todo events +export const TodoUpdatedEventSchema = z.object({ + type: z.literal("todo.updated"), + properties: z.object({ + sessionID: z.string(), + todos: z.array( + z.object({ + content: z.string(), + status: z.enum(["pending", "in_progress", "completed"]), + activeForm: z.string(), + }) + ), + }), +}) + +// Base event schema (without context) +const BaseEventSchema = z.discriminatedUnion("type", [ + SessionCreatedEventSchema, + SessionUpdatedEventSchema, + SessionDeletedEventSchema, + SessionIdleEventSchema, + SessionCompactedEventSchema, + SessionDiffEventSchema, + SessionStatusEventSchema, + MessageCreatedEventSchema, + MessageUpdatedEventSchema, + MessageRemovedEventSchema, + MessagePartUpdatedEventSchema, + MessagePartRemovedEventSchema, + TodoUpdatedEventSchema, +]) + +// Union of all events with optional subagent context +export const EventSchema = BaseEventSchema.and( + z.object({ + context: SubagentContextSchema.optional(), + }) +) + +export type Event = z.infer + +// Event type enum for convenience +export type EventType = Event["type"] + +// Helper to create typed events +export function createEvent( + type: T, + properties: Extract["properties"] +): Extract { + return { type, properties } as Extract +} + +// Category types for export +export type SessionEvent = z.infer< + | typeof SessionCreatedEventSchema + | typeof SessionUpdatedEventSchema + | typeof SessionDeletedEventSchema + | typeof SessionIdleEventSchema + | typeof SessionCompactedEventSchema + | typeof SessionDiffEventSchema + | typeof SessionStatusEventSchema +> + +export type MessageEvent = z.infer< + | typeof MessageCreatedEventSchema + | typeof MessageUpdatedEventSchema + | typeof MessageRemovedEventSchema +> + +export type PartEvent = z.infer< + | typeof MessagePartUpdatedEventSchema + | typeof MessagePartRemovedEventSchema +> + +export type TodoEvent = z.infer diff --git a/packages/kiana/src/types/index.ts b/packages/kiana/src/types/index.ts new file mode 100644 index 00000000000..0b2262ea1b7 --- /dev/null +++ b/packages/kiana/src/types/index.ts @@ -0,0 +1,5 @@ +// Re-export all types +export * from "./session.js" +export * from "./message.js" +export * from "./part.js" +export * from "./event.js" diff --git a/packages/kiana/src/types/message.ts b/packages/kiana/src/types/message.ts new file mode 100644 index 00000000000..be89ddd5ed6 --- /dev/null +++ b/packages/kiana/src/types/message.ts @@ -0,0 +1,96 @@ +import { z } from "zod" +import { FileDiffSchema } from "./session.js" + +export const ModelRefSchema = z.object({ + providerID: z.string(), + modelID: z.string(), +}) + +export type ModelRef = z.infer + +export const MessageTimeSchema = z.object({ + created: z.number(), + completed: z.number().optional(), +}) + +export type MessageTime = z.infer + +export const MessagePathSchema = z.object({ + cwd: z.string(), + root: z.string(), +}) + +export type MessagePath = z.infer + +export const CacheUsageSchema = z.object({ + read: z.number(), + write: z.number(), +}) + +export type CacheUsage = z.infer + +export const TokenUsageSchema = z.object({ + input: z.number(), + output: z.number(), + reasoning: z.number(), + cache: CacheUsageSchema, +}) + +export type TokenUsage = z.infer + +export const MessageErrorDataSchema = z.object({ + message: z.string(), + providerID: z.string().optional(), + status: z.number().optional(), + retryable: z.boolean().optional(), +}) + +export type MessageErrorData = z.infer + +export const MessageErrorSchema = z.object({ + name: z.enum([ + "UnknownError", + "ProviderAuthError", + "MessageOutputLengthError", + "MessageAbortedError", + "APIError", + ]), + data: MessageErrorDataSchema, +}) + +export type MessageError = z.infer + +export const UserMessageSummarySchema = z.object({ + title: z.string().optional(), + body: z.string().optional(), + diffs: z.array(FileDiffSchema).optional(), +}) + +export type UserMessageSummary = z.infer + +export const MessageInfoSchema = z.object({ + id: z.string(), + sessionID: z.string(), + role: z.enum(["user", "assistant"]), + time: MessageTimeSchema, + + // User fields + agent: z.string().optional(), + model: ModelRefSchema.optional(), + system: z.string().optional(), + tools: z.record(z.string(), z.boolean()).optional(), + summary: UserMessageSummarySchema.optional(), + + // Assistant fields + parentID: z.string().optional(), + modelID: z.string().optional(), + providerID: z.string().optional(), + mode: z.string().optional(), + path: MessagePathSchema.optional(), + cost: z.number().optional(), + tokens: TokenUsageSchema.optional(), + finish: z.string().optional(), + error: MessageErrorSchema.optional(), +}) + +export type MessageInfo = z.infer diff --git a/packages/kiana/src/types/part.ts b/packages/kiana/src/types/part.ts new file mode 100644 index 00000000000..3af3f13ea89 --- /dev/null +++ b/packages/kiana/src/types/part.ts @@ -0,0 +1,229 @@ +import { z } from "zod" +import { TokenUsageSchema } from "./message.js" + +// Common time structures +export const PartTimeSchema = z.object({ + start: z.number().optional(), + end: z.number().optional(), +}) + +export type PartTime = z.infer + +export const ToolTimeSchema = z.object({ + start: z.number(), + end: z.number().optional(), + compacted: z.number().optional(), +}) + +export type ToolTime = z.infer + +// Base part fields +const BasePartSchema = z.object({ + id: z.string(), + sessionID: z.string(), + messageID: z.string(), +}) + +// Text part +export const TextPartSchema = BasePartSchema.extend({ + type: z.literal("text"), + text: z.string(), + synthetic: z.boolean().optional(), + ignored: z.boolean().optional(), + time: PartTimeSchema.optional(), + metadata: z.record(z.string(), z.unknown()).optional(), +}) + +export type TextPart = z.infer + +// Reasoning part +export const ReasoningPartSchema = BasePartSchema.extend({ + type: z.literal("reasoning"), + text: z.string(), + time: PartTimeSchema.optional(), + metadata: z.record(z.string(), z.unknown()).optional(), +}) + +export type ReasoningPart = z.infer + +// Tool state variants +export const ToolStatePendingSchema = z.object({ + status: z.literal("pending"), + input: z.record(z.string(), z.unknown()), + raw: z.string(), +}) + +export const ToolStateRunningSchema = z.object({ + status: z.literal("running"), + input: z.record(z.string(), z.unknown()), + title: z.string().optional(), + metadata: z.record(z.string(), z.unknown()).optional(), + time: z.object({ start: z.number() }), +}) + +export const ToolStateCompletedSchema = z.object({ + status: z.literal("completed"), + input: z.record(z.string(), z.unknown()), + output: z.string(), + title: z.string(), + metadata: z.record(z.string(), z.unknown()), + time: z.object({ + start: z.number(), + end: z.number(), + compacted: z.number().optional(), + }), + attachments: z.array(z.lazy(() => FilePartSchema)).optional(), +}) + +export const ToolStateErrorSchema = z.object({ + status: z.literal("error"), + input: z.record(z.string(), z.unknown()), + error: z.string(), + metadata: z.record(z.string(), z.unknown()).optional(), + time: z.object({ + start: z.number(), + end: z.number(), + }), +}) + +export const ToolStateSchema = z.discriminatedUnion("status", [ + ToolStatePendingSchema, + ToolStateRunningSchema, + ToolStateCompletedSchema, + ToolStateErrorSchema, +]) + +export type ToolState = z.infer + +// Tool part +export const ToolPartSchema = BasePartSchema.extend({ + type: z.literal("tool"), + callID: z.string(), + tool: z.string(), + state: ToolStateSchema, + metadata: z.record(z.string(), z.unknown()).optional(), +}) + +export type ToolPart = z.infer + +// File part +export const FilePartSchema = BasePartSchema.extend({ + type: z.literal("file"), + mime: z.string(), + filename: z.string().optional(), + url: z.string(), + source: z + .object({ + type: z.string(), + url: z.string().optional(), + }) + .optional(), +}) + +export type FilePart = z.infer + +// Step start part +export const StepStartPartSchema = BasePartSchema.extend({ + type: z.literal("step-start"), + snapshot: z.string().optional(), +}) + +export type StepStartPart = z.infer + +// Step finish part +export const StepFinishPartSchema = BasePartSchema.extend({ + type: z.literal("step-finish"), + reason: z.string(), + snapshot: z.string().optional(), + cost: z.number(), + tokens: TokenUsageSchema.optional(), +}) + +export type StepFinishPart = z.infer + +// Snapshot part +export const SnapshotPartSchema = BasePartSchema.extend({ + type: z.literal("snapshot"), + snapshot: z.string(), +}) + +export type SnapshotPart = z.infer + +// Patch part +export const PatchPartSchema = BasePartSchema.extend({ + type: z.literal("patch"), + hash: z.string(), + files: z.array(z.string()), +}) + +export type PatchPart = z.infer + +// Agent part +export const AgentPartSchema = BasePartSchema.extend({ + type: z.literal("agent"), + name: z.string(), + source: z + .object({ + value: z.string(), + start: z.number(), + end: z.number(), + }) + .optional(), +}) + +export type AgentPart = z.infer + +// Retry part +export const RetryPartSchema = BasePartSchema.extend({ + type: z.literal("retry"), + attempt: z.number(), + error: z.object({ + name: z.literal("APIError"), + data: z.object({ + status: z.number().optional(), + message: z.string(), + retryable: z.boolean().optional(), + }), + }), + time: z.object({ + created: z.number(), + }), +}) + +export type RetryPart = z.infer + +// Compaction part +export const CompactionPartSchema = BasePartSchema.extend({ + type: z.literal("compaction"), + auto: z.boolean(), +}) + +export type CompactionPart = z.infer + +// Subtask part (Go-only, but included for compatibility) +export const SubtaskPartSchema = BasePartSchema.extend({ + type: z.literal("subtask"), + prompt: z.string(), + description: z.string(), + agent: z.string(), +}) + +export type SubtaskPart = z.infer + +// Union of all parts +export const PartSchema = z.discriminatedUnion("type", [ + TextPartSchema, + ReasoningPartSchema, + ToolPartSchema, + FilePartSchema, + StepStartPartSchema, + StepFinishPartSchema, + SnapshotPartSchema, + PatchPartSchema, + AgentPartSchema, + RetryPartSchema, + CompactionPartSchema, + SubtaskPartSchema, +]) + +export type Part = z.infer diff --git a/packages/kiana/src/types/session.ts b/packages/kiana/src/types/session.ts new file mode 100644 index 00000000000..3d731f507fd --- /dev/null +++ b/packages/kiana/src/types/session.ts @@ -0,0 +1,57 @@ +import { z } from "zod" + +export const FileDiffSchema = z.object({ + file: z.string(), + additions: z.number(), + deletions: z.number(), + diff: z.string().optional(), +}) + +export type FileDiff = z.infer + +export const SessionSummarySchema = z.object({ + additions: z.number(), + deletions: z.number(), + files: z.number(), + diffs: z.array(FileDiffSchema).optional(), +}) + +export type SessionSummary = z.infer + +export const SessionShareSchema = z.object({ + url: z.string(), +}) + +export type SessionShare = z.infer + +export const SessionTimeSchema = z.object({ + created: z.number(), + updated: z.number(), + compacting: z.number().optional(), +}) + +export type SessionTime = z.infer + +export const SessionRevertSchema = z.object({ + messageID: z.string(), + partID: z.string().optional(), + snapshot: z.string().optional(), + diff: z.string().optional(), +}) + +export type SessionRevert = z.infer + +export const SessionInfoSchema = z.object({ + id: z.string(), + projectID: z.string(), + directory: z.string(), + parentID: z.string().optional(), + title: z.string(), + version: z.string(), + time: SessionTimeSchema, + summary: SessionSummarySchema.optional(), + share: SessionShareSchema.optional(), + revert: SessionRevertSchema.optional(), +}) + +export type SessionInfo = z.infer diff --git a/packages/kiana/src/util/ripgrep.ts b/packages/kiana/src/util/ripgrep.ts new file mode 100644 index 00000000000..74d37967a26 --- /dev/null +++ b/packages/kiana/src/util/ripgrep.ts @@ -0,0 +1,220 @@ +import { spawn } from "node:child_process" +import * as fs from "node:fs" +import * as path from "node:path" + +let rgPath: string | null = null +let checkedRg = false + +/** + * Find ripgrep binary, returns null if not available + */ +async function findRg(): Promise { + if (checkedRg) return rgPath + + checkedRg = true + + // Check common locations + const candidates = ["rg", "/usr/bin/rg", "/usr/local/bin/rg"] + + for (const candidate of candidates) { + try { + const result = await execCommand(candidate, ["--version"]) + if (result.exitCode === 0) { + rgPath = candidate + return rgPath + } + } catch { + // Not found, try next + } + } + + return null +} + +/** + * Execute a command and return output + */ +function execCommand( + cmd: string, + args: string[], + options?: { cwd?: string; maxBuffer?: number } +): Promise<{ stdout: string; stderr: string; exitCode: number }> { + return new Promise((resolve, reject) => { + const proc = spawn(cmd, args, { + cwd: options?.cwd, + stdio: ["ignore", "pipe", "pipe"], + }) + + let stdout = "" + let stderr = "" + const maxBuffer = options?.maxBuffer ?? 1024 * 1024 * 10 // 10MB + + proc.stdout.on("data", (data) => { + if (stdout.length < maxBuffer) { + stdout += data.toString() + } + }) + + proc.stderr.on("data", (data) => { + if (stderr.length < maxBuffer) { + stderr += data.toString() + } + }) + + proc.on("error", reject) + proc.on("close", (code) => { + resolve({ stdout, stderr, exitCode: code ?? 0 }) + }) + }) +} + +/** + * List files matching glob patterns using ripgrep or find + */ +export async function* listFiles(options: { + cwd: string + glob?: string[] +}): AsyncGenerator { + const rg = await findRg() + + if (rg) { + // Use ripgrep + const args = ["--files", "--follow", "--hidden", "--glob=!.git/*"] + if (options.glob) { + for (const g of options.glob) { + args.push(`--glob=${g}`) + } + } + + const result = await execCommand(rg, args, { cwd: options.cwd }) + if (result.exitCode === 0 || result.exitCode === 1) { + // exitCode 1 means no matches, which is ok + const lines = result.stdout.trim().split("\n").filter(Boolean) + for (const line of lines) { + yield line + } + } + } else { + // Fallback to find command + const args = [".", "-type", "f", "-not", "-path", "*/.git/*"] + if (options.glob && options.glob.length > 0) { + // Convert glob patterns to find -name patterns + for (const g of options.glob) { + if (g.startsWith("!")) { + // Exclusion pattern + const pattern = g.slice(1) + args.push("-not", "-path", `*/${pattern}`) + } else { + args.push("-name", g.replace("**/*", "*")) + } + } + } + + const result = await execCommand("find", args, { cwd: options.cwd }) + if (result.exitCode === 0) { + const lines = result.stdout + .trim() + .split("\n") + .filter(Boolean) + .map((line) => line.replace(/^\.\//, "")) + for (const line of lines) { + yield line + } + } + } +} + +/** + * Search file contents using ripgrep or grep + */ +export async function searchContent(options: { + cwd: string + pattern: string + glob?: string +}): Promise< + Array<{ + path: string + lineNum: number + lineText: string + }> +> { + const rg = await findRg() + + if (rg) { + // Use ripgrep + const args = ["-nH", "--field-match-separator=|", "--regexp", options.pattern] + if (options.glob) { + args.push("--glob", options.glob) + } + args.push(options.cwd) + + const result = await execCommand(rg, args) + + if (result.exitCode === 1) { + // No matches + return [] + } + + if (result.exitCode !== 0) { + throw new Error(`ripgrep failed: ${result.stderr}`) + } + + return parseSearchOutput(result.stdout, "|") + } else { + // Fallback to grep + const args = ["-rnH", "--include=" + (options.glob || "*"), options.pattern, options.cwd] + + const result = await execCommand("grep", args) + + if (result.exitCode === 1) { + // No matches + return [] + } + + if (result.exitCode !== 0) { + throw new Error(`grep failed: ${result.stderr}`) + } + + return parseSearchOutput(result.stdout, ":") + } +} + +/** + * Parse search output (works for both rg and grep) + */ +function parseSearchOutput( + output: string, + separator: string +): Array<{ path: string; lineNum: number; lineText: string }> { + const lines = output.trim().split("\n") + const matches: Array<{ path: string; lineNum: number; lineText: string }> = [] + + for (const line of lines) { + if (!line) continue + + const parts = line.split(separator) + if (parts.length < 3) continue + + const filePath = parts[0] + const lineNumStr = parts[1] + const lineText = parts.slice(2).join(separator) + + const lineNum = parseInt(lineNumStr, 10) + if (isNaN(lineNum)) continue + + matches.push({ + path: filePath, + lineNum, + lineText, + }) + } + + return matches +} + +/** + * Check if ripgrep is available + */ +export async function hasRipgrep(): Promise { + return (await findRg()) !== null +} diff --git a/packages/kiana/test-config.jsonc b/packages/kiana/test-config.jsonc new file mode 100644 index 00000000000..44ba358860d --- /dev/null +++ b/packages/kiana/test-config.jsonc @@ -0,0 +1,9 @@ +{ + "provider": { + "type": "openai-compatible", + "apiKey": "23ed9d5c-c634-4cc1-9e70-9c9ac63a17ef", + "model": "ep-20250821060450-4bc6g", + "baseUrl": "https://ark-ap-southeast.byteintl.net/api/v3" + }, + "workingDirectory": "/Users/joohwi.lee/telnet2/opencode/packages/kiana" +} diff --git a/packages/kiana/tsconfig.json b/packages/kiana/tsconfig.json new file mode 100644 index 00000000000..c5aa753d6d0 --- /dev/null +++ b/packages/kiana/tsconfig.json @@ -0,0 +1,21 @@ +{ + "compilerOptions": { + "target": "ES2023", + "module": "NodeNext", + "moduleResolution": "NodeNext", + "lib": ["ES2023"], + "outDir": "./dist", + "rootDir": "./src", + "strict": true, + "esModuleInterop": true, + "skipLibCheck": true, + "forceConsistentCasingInFileNames": true, + "declaration": true, + "declarationMap": true, + "sourceMap": true, + "resolveJsonModule": true, + "isolatedModules": true + }, + "include": ["src/**/*"], + "exclude": ["node_modules", "dist"] +} From 6677908115b5c5a68c5d7ab659641e1e826d7af4 Mon Sep 17 00:00:00 2001 From: Joohwi Lee Date: Thu, 11 Dec 2025 00:48:44 -0800 Subject: [PATCH 2/3] feat: enhance kiana with comprehensive session persistence, streaming controls, and provider improvements MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Summary This major update significantly enhances the Kiana headless coding agent with robust session persistence, configurable streaming modes, improved provider compatibility, and comprehensive token tracking capabilities. ## Key Features ### 🔍 Session Persistence & State Management - **Full Session Persistence**: Complete session state including messages, parts, and metadata saved to disk - **Session Storage**: New `SessionStorage` interface with JSON-based persistence in `session.json` and `messages.json` - **Session Reloading**: Automatic loading of existing sessions when session directory is provided - **State Recovery**: Seamless continuation of previous sessions with full message history ### ⚙️ Streaming & Configuration Enhancements - **Configurable Streaming**: New `streaming` config option to toggle between streaming and non-streaming modes - **Non-Streaming Mode**: Better token counting support for OpenAI-compatible providers - **Retry Logic**: Configurable `maxRetries` (default: 5) with exponential backoff for rate limit handling - **Provider Compatibility**: Enhanced support for Ark, Moonshot, and other OpenAI-compatible APIs ### 📊 Token Tracking & Usage Analytics - **Comprehensive Token Counting**: Support for both AI SDK v6 nested format and legacy flat format - **Usage Data**: Detailed token breakdown including input, output, reasoning, and cache metrics - **Real-time Updates**: Token counts updated throughout streaming sessions - **Final Usage**: Accurate total usage accumulation across all steps ### 🛠️ CLI & Provider Improvements - **CLI Session Handling**: Simplified session creation with optional persistence directory - **Provider Configuration**: Added `includeUsage: true` for OpenAI-compatible streaming usage - **Tool Call Deduplication**: Prevent duplicate "running" headers for the same tool call - **Error Handling**: Improved error handling and state management throughout the pipeline ### 🔄 Code Architecture - **Session Options**: New `SessionOptions` interface for optional session configuration - **Storage Abstraction**: Clean separation between session logic and persistence layer - **Backward Compatibility**: All new features are optional with sensible defaults - **Type Safety**: Enhanced TypeScript types for all new functionality ## Breaking Changes None - all new features are backward compatible with existing configurations. ## Configuration Updates ```jsonc { // New configuration options "streaming": true, // Enable/disable streaming mode "maxRetries": 5, // Rate limit retry attempts // ... existing config } ``` ## Usage Examples ```typescript // Create session with persistence const session = await createSession(config, { sessionDir: './my-session' }); // Disable streaming for better token counting const config = { ...baseConfig, streaming: false, maxRetries: 3 }; ``` ## Technical Details - **File Structure**: Sessions saved as `session.json` (metadata) and `messages.json` (full history) - **Error Recovery**: Graceful handling of corrupted session files - **Performance**: Minimal overhead for persistence operations - **Cross-Platform**: Works on all Node.js supported platforms --- packages/kiana/commit-message.txt | 70 +++ packages/kiana/kiana.jsonc | 3 +- packages/kiana/src/cli.ts | 44 +- packages/kiana/src/config.ts | 20 +- packages/kiana/src/index.ts | 2 +- packages/kiana/src/provider.ts | 4 + packages/kiana/src/session.ts | 691 ++++++++++++++++++----------- packages/kiana/test-config.jsonc | 3 +- packages/kiana/tmpx/messages.json | 591 ++++++++++++++++++++++++ packages/kiana/tmpx/session.json | 11 + packages/kiana/tmpx2/messages.json | 156 +++++++ packages/kiana/tmpx2/session.json | 11 + packages/kiana/tmpx3/messages.json | 82 ++++ packages/kiana/tmpx3/session.json | 11 + packages/kiana/tmpx4/messages.json | 82 ++++ packages/kiana/tmpx4/session.json | 11 + 16 files changed, 1498 insertions(+), 294 deletions(-) create mode 100644 packages/kiana/commit-message.txt create mode 100644 packages/kiana/tmpx/messages.json create mode 100644 packages/kiana/tmpx/session.json create mode 100644 packages/kiana/tmpx2/messages.json create mode 100644 packages/kiana/tmpx2/session.json create mode 100644 packages/kiana/tmpx3/messages.json create mode 100644 packages/kiana/tmpx3/session.json create mode 100644 packages/kiana/tmpx4/messages.json create mode 100644 packages/kiana/tmpx4/session.json diff --git a/packages/kiana/commit-message.txt b/packages/kiana/commit-message.txt new file mode 100644 index 00000000000..9179d38d2a8 --- /dev/null +++ b/packages/kiana/commit-message.txt @@ -0,0 +1,70 @@ +feat: enhance kiana with comprehensive session persistence, streaming controls, and provider improvements + +## Summary +This major update significantly enhances the Kiana headless coding agent with robust session persistence, configurable streaming modes, improved provider compatibility, and comprehensive token tracking capabilities. + +## Key Features + +### 🔍 Session Persistence & State Management +- **Full Session Persistence**: Complete session state including messages, parts, and metadata saved to disk +- **Session Storage**: New `SessionStorage` interface with JSON-based persistence in `session.json` and `messages.json` +- **Session Reloading**: Automatic loading of existing sessions when session directory is provided +- **State Recovery**: Seamless continuation of previous sessions with full message history + +### ⚙️ Streaming & Configuration Enhancements +- **Configurable Streaming**: New `streaming` config option to toggle between streaming and non-streaming modes +- **Non-Streaming Mode**: Better token counting support for OpenAI-compatible providers +- **Retry Logic**: Configurable `maxRetries` (default: 5) with exponential backoff for rate limit handling +- **Provider Compatibility**: Enhanced support for Ark, Moonshot, and other OpenAI-compatible APIs + +### 📊 Token Tracking & Usage Analytics +- **Comprehensive Token Counting**: Support for both AI SDK v6 nested format and legacy flat format +- **Usage Data**: Detailed token breakdown including input, output, reasoning, and cache metrics +- **Real-time Updates**: Token counts updated throughout streaming sessions +- **Final Usage**: Accurate total usage accumulation across all steps + +### 🛠️ CLI & Provider Improvements +- **CLI Session Handling**: Simplified session creation with optional persistence directory +- **Provider Configuration**: Added `includeUsage: true` for OpenAI-compatible streaming usage +- **Tool Call Deduplication**: Prevent duplicate "running" headers for the same tool call +- **Error Handling**: Improved error handling and state management throughout the pipeline + +### 🔄 Code Architecture +- **Session Options**: New `SessionOptions` interface for optional session configuration +- **Storage Abstraction**: Clean separation between session logic and persistence layer +- **Backward Compatibility**: All new features are optional with sensible defaults +- **Type Safety**: Enhanced TypeScript types for all new functionality + +## Breaking Changes +None - all new features are backward compatible with existing configurations. + +## Configuration Updates +```jsonc +{ + // New configuration options + "streaming": true, // Enable/disable streaming mode + "maxRetries": 5, // Rate limit retry attempts + // ... existing config +} +``` + +## Usage Examples +```typescript +// Create session with persistence +const session = await createSession(config, { + sessionDir: './my-session' +}); + +// Disable streaming for better token counting +const config = { + ...baseConfig, + streaming: false, + maxRetries: 3 +}; +``` + +## Technical Details +- **File Structure**: Sessions saved as `session.json` (metadata) and `messages.json` (full history) +- **Error Recovery**: Graceful handling of corrupted session files +- **Performance**: Minimal overhead for persistence operations +- **Cross-Platform**: Works on all Node.js supported platforms \ No newline at end of file diff --git a/packages/kiana/kiana.jsonc b/packages/kiana/kiana.jsonc index 72d963fb24a..101fe8b5b66 100644 --- a/packages/kiana/kiana.jsonc +++ b/packages/kiana/kiana.jsonc @@ -4,5 +4,6 @@ "apiKey": "23ed9d5c-c634-4cc1-9e70-9c9ac63a17ef", "model": "ep-20250821060450-4bc6g", "baseUrl": "https://ark-ap-southeast.byteintl.net/api/v3" - } + }, + "streaming": true } diff --git a/packages/kiana/src/cli.ts b/packages/kiana/src/cli.ts index 9dfa72df4f8..a3259fc3367 100644 --- a/packages/kiana/src/cli.ts +++ b/packages/kiana/src/cli.ts @@ -106,6 +106,9 @@ const colors = { gray: "\x1b[90m", } +// Track tool calls that have already shown their "running" header +const seenRunningTools = new Set() + // Human-readable event formatter function formatEventHumanReadable(event: any, verbose: boolean = false): string | null { const indent = event.context ? " ".repeat(event.context.depth) : "" @@ -135,7 +138,11 @@ function formatEventHumanReadable(event: any, verbose: boolean = false): string // Streaming text delta return `${indent}${agentPrefix}${delta}` } - return null // Final update without delta + // Non-streaming: print full text + if (part.text) { + return `${indent}${agentPrefix}${part.text}\n` + } + return null } if (part.type === "reasoning") { @@ -148,6 +155,13 @@ function formatEventHumanReadable(event: any, verbose: boolean = false): string if (part.type === "tool") { const state = part.state if (state.status === "running") { + // Only show the "running" header once per tool call + const toolKey = `${part.id}:${part.callID}` + if (seenRunningTools.has(toolKey)) { + return null // Already shown + } + seenRunningTools.add(toolKey) + const toolName = part.tool const title = state.title || formatToolInput(toolName, state.input) let output = `\n${indent}${agentPrefix}${colors.cyan}▶ ${toolName}${colors.reset} ${colors.gray}${title}${colors.reset}\n` @@ -297,8 +311,9 @@ async function main(): Promise { config.workingDirectory = path.resolve(args.cwd) } - // Create session - const session = await createSession(config) + // Create session with optional persistence + const sessionDir = args.session ? path.resolve(args.session) : undefined + const session = await createSession(config, { sessionDir }) // Subscribe to events and output if (args.humanReadable) { @@ -315,29 +330,6 @@ async function main(): Promise { }) } - // Handle session persistence - if (args.session) { - const sessionDir = path.resolve(args.session) - fs.mkdirSync(sessionDir, { recursive: true }) - - // Save session info - const sessionFile = path.join(sessionDir, "session.json") - fs.writeFileSync( - sessionFile, - JSON.stringify( - { - id: session.id, - created: Date.now(), - config: configPath, - }, - null, - 2 - ) - ) - - // TODO: Implement full session persistence with messages/parts - } - // If --prompt is provided, send it and exit after completion if (args.prompt) { await session.sendMessage(args.prompt) diff --git a/packages/kiana/src/config.ts b/packages/kiana/src/config.ts index 5e0873e287a..b683b2f3f28 100644 --- a/packages/kiana/src/config.ts +++ b/packages/kiana/src/config.ts @@ -16,6 +16,12 @@ export const ConfigSchema = z.object({ systemPrompt: z.string().optional(), workingDirectory: z.string().nullable().optional(), tools: z.array(z.string()).nullable().optional(), + // Whether to use streaming mode (default: true) + // Set to false for proper token counting with openai-compatible providers + streaming: z.boolean().optional().default(true), + // Maximum number of retries for rate limit errors (default: 5) + // Uses exponential backoff and respects Retry-After headers + maxRetries: z.number().int().min(0).max(10).optional().default(5), }) export type Config = z.infer @@ -66,6 +72,10 @@ Since you are running headless without user interaction: # Tool usage - Call multiple tools in parallel when independent. - Use specialized tools (Read, Write, Edit, Glob, Grep) instead of bash equivalents. +- CRITICAL: When calling tools, pass arguments as a proper JSON object, NOT as a string. + - Correct: {"command": "git status", "description": "Check git status"} + - Wrong: "{\"command\": \"git status\", \"description\": \"Check git status\"}" +- Never stringify or escape the arguments object - pass it directly as structured data. # Code References When referencing code, include \`file_path:line_number\` for navigation.` @@ -90,7 +100,15 @@ export const CONFIG_TEMPLATE = `{ // Tool whitelist (optional, null = all tools enabled) // Available: bash, read, write, edit, glob, grep, list, webfetch, websearch, codesearch, todowrite, todoread, task - "tools": null + "tools": null, + + // Whether to use streaming mode (default: true) + // Set to false for proper token counting with openai-compatible providers + "streaming": true, + + // Maximum retries for rate limit errors (default: 5) + // Uses exponential backoff and respects Retry-After headers + "maxRetries": 5 } ` diff --git a/packages/kiana/src/index.ts b/packages/kiana/src/index.ts index c7f073b3135..8c9effdbed8 100644 --- a/packages/kiana/src/index.ts +++ b/packages/kiana/src/index.ts @@ -2,7 +2,7 @@ // A standalone TypeScript package providing a headless coding agent library + CLI wrapper // Session API -export { createSession, type Session, type SessionState, type MessageWithParts } from "./session.js" +export { createSession, type Session, type SessionOptions, type SessionState, type MessageWithParts } from "./session.js" // Config export { loadConfig, writeConfigTemplate, type Config, type ProviderConfig } from "./config.js" diff --git a/packages/kiana/src/provider.ts b/packages/kiana/src/provider.ts index 09645772689..8f2734cb7db 100644 --- a/packages/kiana/src/provider.ts +++ b/packages/kiana/src/provider.ts @@ -30,10 +30,14 @@ export function createLanguageModel(config: ProviderConfig): LanguageModel { if (!config.baseUrl) { throw new Error("baseUrl is required for openai-compatible provider") } + // Use openai-compatible provider for APIs like Ark, Moonshot, etc. + // includeUsage: true sends stream_options.include_usage to get token counts in streaming mode + // (requires provider support for OpenAI's streaming usage extension) const compatible = createOpenAICompatible({ name: "openai-compatible", apiKey: config.apiKey, baseURL: config.baseUrl, + includeUsage: true, }) return compatible(config.model) } diff --git a/packages/kiana/src/session.ts b/packages/kiana/src/session.ts index 012bd271f1c..388b31a085a 100644 --- a/packages/kiana/src/session.ts +++ b/packages/kiana/src/session.ts @@ -1,6 +1,8 @@ -import { streamText, type LanguageModel, tool, jsonSchema } from "ai" +import { streamText, generateText, type LanguageModel, tool, jsonSchema } from "ai" import { z } from "zod" import { spawn } from "node:child_process" +import * as fs from "node:fs" +import * as path from "node:path" import { EventBus, type EventTypes } from "./event.js" import { Config } from "./config.js" import { createLanguageModel } from "./provider.js" @@ -10,6 +12,39 @@ import type { SessionInfo } from "./types/session.js" import type { MessageInfo } from "./types/message.js" import type { Part, TextPart, ToolPart, ReasoningPart, StepStartPart, StepFinishPart } from "./types/part.js" +// Session storage for persistence +interface SessionStorage { + sessionDir: string + save(session: SessionInfo, messages: MessageWithParts[]): void + load(): { session: SessionInfo; messages: MessageWithParts[] } | null +} + +function createSessionStorage(sessionDir: string): SessionStorage { + const sessionFile = path.join(sessionDir, "session.json") + const messagesFile = path.join(sessionDir, "messages.json") + + return { + sessionDir, + save(session: SessionInfo, messages: MessageWithParts[]) { + fs.mkdirSync(sessionDir, { recursive: true }) + fs.writeFileSync(sessionFile, JSON.stringify(session, null, 2)) + fs.writeFileSync(messagesFile, JSON.stringify(messages, null, 2)) + }, + load() { + if (!fs.existsSync(sessionFile) || !fs.existsSync(messagesFile)) { + return null + } + try { + const session = JSON.parse(fs.readFileSync(sessionFile, "utf-8")) as SessionInfo + const messages = JSON.parse(fs.readFileSync(messagesFile, "utf-8")) as MessageWithParts[] + return { session, messages } + } catch { + return null + } + }, + } +} + // ID generation let idCounter = 0 function generateId(prefix: string): string { @@ -29,6 +64,7 @@ interface SessionState { abortController: AbortController eventBus: EventBus workingDirectory: string + storage: SessionStorage | null } interface MessageWithParts { @@ -36,6 +72,10 @@ interface MessageWithParts { parts: Part[] } +export interface SessionOptions { + sessionDir?: string // Directory for session persistence +} + export interface Session { id: string sendMessage(text: string): Promise @@ -44,9 +84,14 @@ export interface Session { abort(): void } -export async function createSession(config: Config): Promise { - const sessionID = generateId("session") - const projectID = generateId("project") +export async function createSession(config: Config, options?: SessionOptions): Promise { + const storage = options?.sessionDir ? createSessionStorage(options.sessionDir) : null + + // Try to load existing session + const existing = storage?.load() + + const sessionID = existing?.session.id ?? generateId("session") + const projectID = existing?.session.projectID ?? generateId("project") const model = createLanguageModel(config.provider) const tools = getTools(config.tools) @@ -60,10 +105,29 @@ export async function createSession(config: Config): Promise { config, model, tools, - messages: [], + messages: existing?.messages ?? [], abortController: new AbortController(), eventBus, workingDirectory, + storage, + } + + // Helper to save session state + function saveSession() { + if (state.storage) { + const sessionInfo: SessionInfo = { + id: state.id, + projectID: state.projectID, + directory: state.workingDirectory, + title: "Session", + version: "1.0.0", + time: { + created: Date.now(), + updated: Date.now(), + }, + } + state.storage.save(sessionInfo, state.messages) + } } // Setup subagent executor for Task tool @@ -165,6 +229,9 @@ export async function createSession(config: Config): Promise { // Run agent loop await runAgentLoop(state) + + // Save session after message completes + saveSession() } function onEvent(callback: (event: EventTypes) => void): () => void { @@ -197,6 +264,52 @@ export async function createSession(config: Config): Promise { } } +/** + * Update message tokens from usage data. + * Handles both AI SDK v6 nested format and older flat format. + */ +function updateTokensFromUsage(message: MessageInfo, usage: any): void { + if (!usage) return + + // AI SDK v6 with OpenAI provider uses nested format: + // { inputTokens: { total, noCache, cacheRead, cacheWrite }, outputTokens: { total, text, reasoning } } + // Older/simpler format: { inputTokens: number, outputTokens: number, cachedInputTokens: number } + let inputTotal = 0 + let outputTotal = 0 + let cachedRead = 0 + let reasoning = 0 + + if (typeof usage.inputTokens === "object" && usage.inputTokens !== null) { + // New nested format from AI SDK v6 OpenAI provider + inputTotal = usage.inputTokens.total ?? 0 + cachedRead = usage.inputTokens.cacheRead ?? 0 + } else { + // Old flat format + inputTotal = usage.inputTokens ?? usage.promptTokens ?? 0 + cachedRead = usage.cachedInputTokens ?? 0 + } + + if (typeof usage.outputTokens === "object" && usage.outputTokens !== null) { + // New nested format + outputTotal = usage.outputTokens.total ?? 0 + reasoning = usage.outputTokens.reasoning ?? 0 + } else { + // Old flat format + outputTotal = usage.outputTokens ?? usage.completionTokens ?? 0 + reasoning = usage.reasoningTokens ?? 0 + } + + ;(message as any).tokens = { + input: inputTotal - cachedRead, + output: outputTotal, + reasoning: reasoning, + cache: { + read: cachedRead, + write: 0, // Provider-specific + }, + } +} + async function runAgentLoop(state: SessionState): Promise { const { eventBus, config, model, tools, workingDirectory } = state @@ -272,235 +385,297 @@ async function runAgentLoop(state: SessionState): Promise { properties: { part: stepStartPart }, }) - // Stream the response - let currentTextPart: TextPart | undefined - let currentReasoningPart: ReasoningPart | undefined - const toolParts: Record = {} - - const stream = streamText({ - model, - system: systemPrompt, - messages: aiMessages, - tools: aiTools, - maxRetries: 0, - abortSignal: state.abortController.signal, - // Repair malformed tool calls - following OpenCode's approach - experimental_repairToolCall: async ({ toolCall, error }) => { - // First, try to repair common JSON errors from LLMs - const repairedJson = tryRepairJson(toolCall.input) - if (repairedJson !== null) { - // Successfully repaired - return the fixed tool call - return { - type: "tool-call" as const, - toolCallId: toolCall.toolCallId, - toolName: toolCall.toolName, - input: JSON.stringify(repairedJson), - } - } - - // Try to repair case-sensitivity issues (e.g., "Read" -> "read") - const lower = toolCall.toolName.toLowerCase() - if (lower !== toolCall.toolName && tools[lower]) { - return { - ...toolCall, - toolName: lower, - } - } - - // Could not repair - redirect to "invalid" tool so LLM can see the error - // and retry with corrected arguments - return { - ...toolCall, - input: JSON.stringify({ - tool: toolCall.toolName, - error: error.message, - }), - toolName: "invalid", - } - }, - }) + // Check if streaming is enabled (default: true) + const useStreaming = config.streaming !== false + + if (useStreaming) { + // Streaming mode + let currentTextPart: TextPart | undefined + let currentReasoningPart: ReasoningPart | undefined + const toolParts: Record = {} + + const stream = streamText({ + model, + system: systemPrompt, + messages: aiMessages, + tools: aiTools, + maxRetries: config.maxRetries, // Retry with exponential backoff for rate limit errors + abortSignal: state.abortController.signal, + }) - for await (const chunk of stream.fullStream) { - state.abortController.signal.throwIfAborted() + for await (const chunk of stream.fullStream) { + state.abortController.signal.throwIfAborted() + + switch (chunk.type) { + case "text-start": + currentTextPart = { + id: generateId("part"), + sessionID: state.id, + messageID: assistantMessageID, + type: "text", + text: "", + time: { start: Date.now() }, + } + assistantParts.push(currentTextPart) + break + + case "text-delta": + if (currentTextPart) { + currentTextPart.text += chunk.text + eventBus.emit({ + type: "message.part.updated", + properties: { part: currentTextPart, delta: chunk.text }, + }) + } + break - switch (chunk.type) { - case "text-start": - currentTextPart = { - id: generateId("part"), - sessionID: state.id, - messageID: assistantMessageID, - type: "text", - text: "", - time: { start: Date.now() }, - } - assistantParts.push(currentTextPart) - break + case "text-end": + if (currentTextPart) { + currentTextPart.text = currentTextPart.text.trimEnd() + currentTextPart.time = { + ...currentTextPart.time!, + end: Date.now(), + } + eventBus.emit({ + type: "message.part.updated", + properties: { part: currentTextPart }, + }) + currentTextPart = undefined + } + break + + case "reasoning-start": + currentReasoningPart = { + id: generateId("part"), + sessionID: state.id, + messageID: assistantMessageID, + type: "reasoning", + text: "", + time: { start: Date.now() }, + } + assistantParts.push(currentReasoningPart) + break + + case "reasoning-delta": + if (currentReasoningPart) { + currentReasoningPart.text += chunk.text + eventBus.emit({ + type: "message.part.updated", + properties: { part: currentReasoningPart, delta: chunk.text }, + }) + } + break - case "text-delta": - if (currentTextPart) { - currentTextPart.text += chunk.text - eventBus.emit({ - type: "message.part.updated", - properties: { part: currentTextPart, delta: chunk.text }, - }) - } - break - - case "text-end": - if (currentTextPart) { - currentTextPart.text = currentTextPart.text.trimEnd() - currentTextPart.time = { - ...currentTextPart.time!, - end: Date.now(), + case "reasoning-end": + if (currentReasoningPart) { + currentReasoningPart.text = currentReasoningPart.text.trimEnd() + currentReasoningPart.time = { + ...currentReasoningPart.time!, + end: Date.now(), + } + eventBus.emit({ + type: "message.part.updated", + properties: { part: currentReasoningPart }, + }) + currentReasoningPart = undefined } + break + + case "tool-input-start": { + const toolPart: ToolPart = { + id: generateId("part"), + sessionID: state.id, + messageID: assistantMessageID, + type: "tool", + tool: chunk.toolName, + callID: chunk.id, + state: { + status: "pending", + input: {}, + raw: "", + }, + } + assistantParts.push(toolPart) + toolParts[chunk.id] = toolPart eventBus.emit({ type: "message.part.updated", - properties: { part: currentTextPart }, + properties: { part: toolPart }, }) - currentTextPart = undefined - } - break - - case "reasoning-start": - currentReasoningPart = { - id: generateId("part"), - sessionID: state.id, - messageID: assistantMessageID, - type: "reasoning", - text: "", - time: { start: Date.now() }, + break } - assistantParts.push(currentReasoningPart) - break - case "reasoning-delta": - if (currentReasoningPart) { - currentReasoningPart.text += chunk.text - eventBus.emit({ - type: "message.part.updated", - properties: { part: currentReasoningPart, delta: chunk.text }, - }) - } - break - - case "reasoning-end": - if (currentReasoningPart) { - currentReasoningPart.text = currentReasoningPart.text.trimEnd() - currentReasoningPart.time = { - ...currentReasoningPart.time!, - end: Date.now(), + case "tool-call": { + const toolPart = toolParts[chunk.toolCallId] + if (toolPart) { + toolPart.state = { + status: "running", + input: (chunk as any).input as Record ?? {}, + time: { start: Date.now() }, + } + eventBus.emit({ + type: "message.part.updated", + properties: { part: toolPart }, + }) } - eventBus.emit({ - type: "message.part.updated", - properties: { part: currentReasoningPart }, - }) - currentReasoningPart = undefined + break } - break - case "tool-input-start": { - const toolPart: ToolPart = { - id: generateId("part"), - sessionID: state.id, - messageID: assistantMessageID, - type: "tool", - tool: chunk.toolName, - callID: chunk.id, - state: { - status: "pending", - input: {}, - raw: "", - }, + case "tool-result": { + const toolPart = toolParts[chunk.toolCallId] + if (toolPart && toolPart.state.status === "running") { + const output = chunk.output as ToolResult + toolPart.state = { + status: "completed", + input: (chunk as any).input as Record ?? {}, + output: output?.output ?? "", + title: output?.title ?? "", + metadata: output?.metadata ?? {}, + time: { + start: toolPart.state.time.start, + end: Date.now(), + }, + } + eventBus.emit({ + type: "message.part.updated", + properties: { part: toolPart }, + }) + } + break } - assistantParts.push(toolPart) - toolParts[chunk.id] = toolPart - eventBus.emit({ - type: "message.part.updated", - properties: { part: toolPart }, - }) - break - } - case "tool-call": { - const toolPart = toolParts[chunk.toolCallId] - if (toolPart) { - toolPart.state = { - status: "running", - input: (chunk as any).input as Record ?? {}, - time: { start: Date.now() }, + case "tool-error": { + const toolPart = toolParts[chunk.toolCallId] + if (toolPart && toolPart.state.status === "running") { + toolPart.state = { + status: "error", + input: (chunk as any).input as Record ?? {}, + error: String(chunk.error), + time: { + start: toolPart.state.time.start, + end: Date.now(), + }, + } + eventBus.emit({ + type: "message.part.updated", + properties: { part: toolPart }, + }) } - eventBus.emit({ - type: "message.part.updated", - properties: { part: toolPart }, - }) + break } - break - } - case "tool-result": { - const toolPart = toolParts[chunk.toolCallId] - if (toolPart && toolPart.state.status === "running") { - const output = chunk.output as ToolResult - toolPart.state = { - status: "completed", - input: (chunk as any).input as Record ?? {}, - output: output?.output ?? "", - title: output?.title ?? "", - metadata: output?.metadata ?? {}, - time: { - start: toolPart.state.time.start, - end: Date.now(), - }, + case "finish-step": { + // Update message with usage + const usage = chunk.usage as any + updateTokensFromUsage(assistantMessage, usage) + ;(assistantMessage as any).finish = chunk.finishReason + + // Emit step finish + const stepFinishPart: StepFinishPart = { + id: generateId("part"), + sessionID: state.id, + messageID: assistantMessageID, + type: "step-finish", + reason: chunk.finishReason, + tokens: (assistantMessage as any).tokens, + cost: 0, } + assistantParts.push(stepFinishPart) eventBus.emit({ type: "message.part.updated", - properties: { part: toolPart }, + properties: { part: stepFinishPart }, }) + break } - break + + case "error": + throw chunk.error } + } - case "tool-error": { - const toolPart = toolParts[chunk.toolCallId] - if (toolPart && toolPart.state.status === "running") { - toolPart.state = { - status: "error", - input: (chunk as any).input as Record ?? {}, - error: String(chunk.error), - time: { - start: toolPart.state.time.start, - end: Date.now(), - }, + // After streaming completes, get final totalUsage for accurate token counts + // This is the accumulated usage across all steps, which is more accurate + // than per-step usage from finish-step events + try { + const totalUsage = await stream.totalUsage + if (totalUsage) { + updateTokensFromUsage(assistantMessage, totalUsage) + // Update the message event with final tokens + eventBus.emit({ + type: "message.updated", + properties: { message: assistantMessage }, + }) + } + } catch { + // totalUsage may not be available if stream was aborted + } + } else { + // Non-streaming mode - better for token counting with some providers + // Use onStepFinish callback to emit events in real-time as each step completes + const result = await generateText({ + model, + system: systemPrompt, + messages: aiMessages, + tools: aiTools, + maxRetries: config.maxRetries, // Retry with exponential backoff for rate limit errors + abortSignal: state.abortController.signal, + onStepFinish: async (step) => { + // Process text content for this step + if (step.text) { + const textPart: TextPart = { + id: generateId("part"), + sessionID: state.id, + messageID: assistantMessageID, + type: "text", + text: step.text.trimEnd(), + time: { start: Date.now(), end: Date.now() }, } + assistantParts.push(textPart) eventBus.emit({ type: "message.part.updated", - properties: { part: toolPart }, + properties: { part: textPart }, }) } - break - } - case "finish-step": { - // Update message with usage - const usage = chunk.usage as any - if (usage) { - ;(assistantMessage as any).tokens = { - input: usage.promptTokens || usage.inputTokens || 0, - output: usage.completionTokens || usage.outputTokens || 0, - reasoning: 0, - cache: { read: 0, write: 0 }, + // Process tool results - update existing tool parts to "completed" + // Note: "running" state was already emitted by onInputAvailable + for (const toolResult of step.toolResults ?? []) { + const toolPart = assistantParts.find( + (p): p is ToolPart => p.type === "tool" && p.callID === toolResult.toolCallId + ) + if (toolPart) { + const output = toolResult.output as ToolResult + toolPart.state = { + status: "completed", + input: toolPart.state.input, + output: output?.output ?? "", + title: output?.title ?? toolPart.tool, + metadata: output?.metadata ?? {}, + time: { + start: (toolPart.state as any).time?.start ?? Date.now(), + end: Date.now(), + }, + } + // Emit completed state + eventBus.emit({ + type: "message.part.updated", + properties: { part: toolPart }, + }) } } - ;(assistantMessage as any).finish = chunk.finishReason - // Emit step finish + // Emit step finish for this step const stepFinishPart: StepFinishPart = { id: generateId("part"), sessionID: state.id, messageID: assistantMessageID, type: "step-finish", - reason: chunk.finishReason, - tokens: (assistantMessage as any).tokens, + reason: step.finishReason, + tokens: { + input: step.usage?.inputTokens ?? 0, + output: step.usage?.outputTokens ?? 0, + reasoning: 0, + cache: { read: 0, write: 0 }, + }, cost: 0, } assistantParts.push(stepFinishPart) @@ -508,12 +683,12 @@ async function runAgentLoop(state: SessionState): Promise { type: "message.part.updated", properties: { part: stepFinishPart }, }) - break - } + }, + }) - case "error": - throw chunk.error - } + // Update total token counts from usage + updateTokensFromUsage(assistantMessage, result.usage as any) + ;(assistantMessage as any).finish = result.finishReason } // Update message completion time @@ -605,7 +780,8 @@ function buildAIMessages(messages: MessageWithParts[]): any[] { type: "tool-call", toolCallId: toolPart.callID, toolName: toolPart.tool, - args: toolPart.state.input, + // AI SDK v6 expects input as stringified JSON + input: JSON.stringify(toolPart.state.input), }) } @@ -617,26 +793,30 @@ function buildAIMessages(messages: MessageWithParts[]): any[] { } // Add tool results as separate messages (AI SDK 6 format) + // All tool results go in a single tool message + const toolResultContent: any[] = [] for (const toolPart of allToolParts) { // Handle both completed and error states - const outputValue = - toolPart.state.status === "error" - ? `Error: ${(toolPart.state as any).error}` - : (toolPart.state as any).output ?? "" + const isError = toolPart.state.status === "error" + const outputValue = isError + ? `Error: ${(toolPart.state as any).error}` + : (toolPart.state as any).output ?? "" + toolResultContent.push({ + type: "tool-result", + toolCallId: toolPart.callID, + toolName: toolPart.tool, + output: { + type: isError ? "error-text" : "text", + value: String(outputValue), + }, + }) + } + + if (toolResultContent.length > 0) { aiMessages.push({ role: "tool", - content: [ - { - type: "tool-result", - toolCallId: toolPart.callID, - toolName: toolPart.tool, - output: { - type: "text", - value: outputValue, - }, - }, - ], + content: toolResultContent, }) } } else if (allTextParts.length > 0) { @@ -670,6 +850,36 @@ function buildAITools( aiTools[name] = tool({ description: toolDef.description, inputSchema: jsonSchema(schema as any), + // Called before tool execution - emit "running" state event + // In streaming mode, the part is already created by tool-input-start + // In non-streaming mode, this is where we create the part + async onInputAvailable({ input, toolCallId }) { + // Check if part already exists (streaming mode creates it in tool-input-start) + let toolPart = parts.find( + (p): p is ToolPart => p.type === "tool" && p.callID === toolCallId + ) + if (!toolPart) { + // Non-streaming mode: create the part + toolPart = { + id: generateId("part"), + sessionID: state.id, + messageID, + type: "tool", + tool: name, + callID: toolCallId, + state: { + status: "running", + input: input as Record, + time: { start: Date.now() }, + }, + } + parts.push(toolPart) + eventBus.emit({ + type: "message.part.updated", + properties: { part: toolPart }, + }) + } + }, async execute(args, options) { const ctx: ToolContext = { sessionID: state.id, @@ -902,53 +1112,6 @@ async function runCommand( }) } -/** - * Attempt to repair common JSON errors from LLMs. - * Returns the parsed object if successful, null if repair failed. - * - * Common errors handled: - * - Missing quote before property name: {"a":"b",c":"d"} -> {"a":"b","c":"d"} - * - Unquoted property names: {command:"ls"} -> {"command":"ls"} - * - Single quotes: {'key':'val'} -> {"key":"val"} - * - Trailing commas: {"a":1,} -> {"a":1} - */ -function tryRepairJson(input: string): Record | null { - // First try parsing as-is - try { - return JSON.parse(input) - } catch { - // Continue to repair attempts - } - - let repaired = input.trim() - - // Fix 1: Add missing quotes around unquoted property names - // {command:"value"} -> {"command":"value"} - repaired = repaired.replace(/([{,]\s*)([a-zA-Z_][a-zA-Z0-9_]*)\s*:/g, '$1"$2":') - - // Fix 2: Fix missing quote before property name after comma - // {"a":"b",c":"d"} -> {"a":"b","c":"d"} - repaired = repaired.replace(/,([a-zA-Z_][a-zA-Z0-9_]*)"/g, ',"$1"') - - // Fix 3: Replace single quotes with double quotes (careful with strings containing apostrophes) - // Only replace if it looks like JSON structure - if (repaired.includes("'")) { - repaired = repaired.replace(/'([^']*)'(\s*[,}\]:])/g, '"$1"$2') - } - - // Fix 4: Remove trailing commas before } or ] - repaired = repaired.replace(/,(\s*[}\]])/g, "$1") - - // Fix 5: Handle unescaped newlines in strings (replace with \n) - // This is a simple heuristic - may not work for all cases - repaired = repaired.replace(/:\s*"([^"]*)\n([^"]*)"/g, ':"$1\\n$2"') - - try { - return JSON.parse(repaired) - } catch { - return null - } -} // Export types export type { SessionState, MessageWithParts } diff --git a/packages/kiana/test-config.jsonc b/packages/kiana/test-config.jsonc index 44ba358860d..4c8aef3318e 100644 --- a/packages/kiana/test-config.jsonc +++ b/packages/kiana/test-config.jsonc @@ -5,5 +5,6 @@ "model": "ep-20250821060450-4bc6g", "baseUrl": "https://ark-ap-southeast.byteintl.net/api/v3" }, - "workingDirectory": "/Users/joohwi.lee/telnet2/opencode/packages/kiana" + "workingDirectory": "/Users/joohwi.lee/telnet2/opencode/packages/kiana", + "streaming": false } diff --git a/packages/kiana/tmpx/messages.json b/packages/kiana/tmpx/messages.json new file mode 100644 index 00000000000..66bc0a1c778 --- /dev/null +++ b/packages/kiana/tmpx/messages.json @@ -0,0 +1,591 @@ +[ + { + "info": { + "id": "message_mj169i600002", + "sessionID": "session_mj169i5z0000", + "role": "user", + "time": { + "created": 1765441461816 + } + }, + "parts": [ + { + "id": "part_mj169i600003", + "sessionID": "session_mj169i5z0000", + "messageID": "message_mj169i600002", + "type": "text", + "text": "list files in src directory" + } + ] + }, + { + "info": { + "id": "message_mj169i610004", + "sessionID": "session_mj169i5z0000", + "role": "assistant", + "parentID": "message_mj169i600002", + "modelID": "ep-20250821060450-4bc6g", + "providerID": "openai-compatible", + "time": { + "created": 1765441461817, + "completed": 1765441463680 + }, + "cost": 0, + "tokens": { + "input": 5860, + "output": 31, + "reasoning": 0, + "cache": { + "read": 0, + "write": 0 + } + }, + "finish": "tool-calls" + }, + "parts": [ + { + "id": "part_mj169ic80005", + "sessionID": "session_mj169i5z0000", + "messageID": "message_mj169i610004", + "type": "step-start" + }, + { + "id": "part_mj169jld0006", + "sessionID": "session_mj169i5z0000", + "messageID": "message_mj169i610004", + "type": "tool", + "tool": "list", + "callID": "list:0", + "state": { + "status": "completed", + "input": { + "path": "/Users/joohwi.lee/telnet2/opencode/packages/kiana/src" + }, + "output": "/Users/joohwi.lee/telnet2/opencode/packages/kiana/src/\n tool/\n bash.ts\n codesearch.ts\n edit.ts\n glob.ts\n grep.ts\n index.ts\n invalid.ts\n list.ts\n read.ts\n task.ts\n todo.ts\n tool.ts\n webfetch.ts\n websearch.ts\n write.ts\n types/\n event.ts\n index.ts\n message.ts\n part.ts\n session.ts\n util/\n ripgrep.ts\n cli.ts\n config.ts\n event.ts\n index.ts\n provider.ts\n session.ts\n", + "title": "src", + "metadata": { + "count": 27, + "truncated": false + }, + "time": { + "start": 1765441463665, + "end": 1765441463680 + } + } + }, + { + "id": "part_mj169jls0007", + "sessionID": "session_mj169i5z0000", + "messageID": "message_mj169i610004", + "type": "step-finish", + "reason": "tool-calls", + "tokens": { + "input": 5860, + "output": 31, + "reasoning": 0, + "cache": { + "read": 0, + "write": 0 + } + }, + "cost": 0 + } + ] + }, + { + "info": { + "id": "message_mj169jls0008", + "sessionID": "session_mj169i5z0000", + "role": "assistant", + "parentID": "message_mj169i600002", + "modelID": "ep-20250821060450-4bc6g", + "providerID": "openai-compatible", + "time": { + "created": 1765441463680, + "completed": 1765441466677 + }, + "cost": 0, + "tokens": { + "input": 6051, + "output": 114, + "reasoning": 0, + "cache": { + "read": 0, + "write": 0 + } + }, + "finish": "stop" + }, + "parts": [ + { + "id": "part_mj169jol0009", + "sessionID": "session_mj169i5z0000", + "messageID": "message_mj169jls0008", + "type": "step-start" + }, + { + "id": "part_mj169lx1000a", + "sessionID": "session_mj169i5z0000", + "messageID": "message_mj169jls0008", + "type": "text", + "text": "The `src` directory contains:\n\n**Root files:**\n- `cli.ts`\n- `config.ts`\n- `event.ts`\n- `index.ts`\n- `provider.ts`\n- `session.ts`\n\n**Subdirectories:**\n- `tool/` - Contains various tool implementations (bash, codesearch, edit, glob, grep, list, read, task, todo, webfetch, websearch, write)\n- `types/` - TypeScript type definitions (event, index, message, part, session)\n- `util/` - Utility functions (ripgrep)", + "time": { + "start": 1765441466677, + "end": 1765441466677 + } + }, + { + "id": "part_mj169lx1000b", + "sessionID": "session_mj169i5z0000", + "messageID": "message_mj169jls0008", + "type": "step-finish", + "reason": "stop", + "tokens": { + "input": 6051, + "output": 114, + "reasoning": 0, + "cache": { + "read": 0, + "write": 0 + } + }, + "cost": 0 + } + ] + }, + { + "info": { + "id": "message_mj169seb0000", + "sessionID": "session_mj169i5z0000", + "role": "user", + "time": { + "created": 1765441475075 + } + }, + "parts": [ + { + "id": "part_mj169seb0001", + "sessionID": "session_mj169i5z0000", + "messageID": "message_mj169seb0000", + "type": "text", + "text": "what tools are available in the tool directory?" + } + ] + }, + { + "info": { + "id": "message_mj169sec0002", + "sessionID": "session_mj169i5z0000", + "role": "assistant", + "parentID": "message_mj169seb0000", + "modelID": "ep-20250821060450-4bc6g", + "providerID": "openai-compatible", + "time": { + "created": 1765441475076, + "completed": 1765441479858 + }, + "cost": 0, + "tokens": { + "input": 6193, + "output": 161, + "reasoning": 0, + "cache": { + "read": 0, + "write": 0 + } + }, + "finish": "stop" + }, + "parts": [ + { + "id": "part_mj169sh30003", + "sessionID": "session_mj169i5z0000", + "messageID": "message_mj169sec0002", + "type": "step-start" + }, + { + "id": "part_mj169w350004", + "sessionID": "session_mj169i5z0000", + "messageID": "message_mj169sec0002", + "type": "text", + "text": "The `src/tool/` directory contains the following tool implementations:\n\n- `bash.ts` - Executes bash commands\n- `codesearch.ts` - Code search functionality\n- `edit.ts` - File editing operations\n- `glob.ts` - File pattern matching\n- `grep.ts` - Content searching\n- `index.ts` - Tool index/exports\n- `invalid.ts` - Invalid tool handling\n- `list.ts` - Directory listing\n- `read.ts` - File reading\n- `task.ts` - Task management\n- `todo.ts` - Todo list management\n- `tool.ts` - Base tool interface\n- `webfetch.ts` - Web content fetching\n- `websearch.ts` - Web search functionality\n- `write.ts` - File writing operations", + "time": { + "start": 1765441479857, + "end": 1765441479857 + } + }, + { + "id": "part_mj169w360005", + "sessionID": "session_mj169i5z0000", + "messageID": "message_mj169sec0002", + "type": "step-finish", + "reason": "stop", + "tokens": { + "input": 6193, + "output": 161, + "reasoning": 0, + "cache": { + "read": 0, + "write": 0 + } + }, + "cost": 0 + } + ] + }, + { + "info": { + "id": "message_mj16f4sa0000", + "sessionID": "session_mj169i5z0000", + "role": "user", + "time": { + "created": 1765441724410 + } + }, + "parts": [ + { + "id": "part_mj16f4sa0001", + "sessionID": "session_mj169i5z0000", + "messageID": "message_mj16f4sa0000", + "type": "text", + "text": "analyze this directory, please analyze what it is about" + } + ] + }, + { + "info": { + "id": "message_mj16f4sc0002", + "sessionID": "session_mj169i5z0000", + "role": "assistant", + "parentID": "message_mj16f4sa0000", + "modelID": "ep-20250821060450-4bc6g", + "providerID": "openai-compatible", + "time": { + "created": 1765441724412, + "completed": 1765441822754 + }, + "cost": 0, + "tokens": { + "input": 6390, + "output": 191, + "reasoning": 0, + "cache": { + "read": 0, + "write": 0 + } + }, + "finish": "tool-calls" + }, + "parts": [ + { + "id": "part_mj16f4va0003", + "sessionID": "session_mj169i5z0000", + "messageID": "message_mj16f4sc0002", + "type": "step-start" + }, + { + "id": "part_mj16f9m20004", + "sessionID": "session_mj169i5z0000", + "messageID": "message_mj16f4sc0002", + "type": "tool", + "tool": "task", + "callID": "task:1", + "state": { + "status": "completed", + "input": { + "description": "Analyze kiana directory structure", + "prompt": "Analyze the entire kiana directory structure and codebase to understand what this project is about. Look at:\n\n1. package.json for project metadata and dependencies\n2. README or any documentation files\n3. Main entry points (index.ts, cli.ts) to understand the architecture\n4. Key source files to understand the core functionality\n5. The types and tool directories to understand the abstraction layers\n\nProvide a comprehensive analysis of:\n- What this project does\n- Its main purpose and functionality\n- The architecture and design patterns used\n- Key components and their roles\n- How the tools and types directories relate to the overall system\n- Any unique or interesting aspects of the codebase\n\nFocus on understanding the project's purpose and how it's structured rather than line-by-line code analysis.", + "subagent_type": "explore" + }, + "output": "No output from subagent\n\n\nsession_id: session_mj16f9m30005\n", + "title": "Analyze kiana directory structure", + "metadata": { + "sessionId": "session_mj16f9m30005", + "status": "completed" + }, + "time": { + "start": 1765441730666, + "end": 1765441822753 + } + } + }, + { + "id": "part_mj16h8o2001u", + "sessionID": "session_mj169i5z0000", + "messageID": "message_mj16f4sc0002", + "type": "step-finish", + "reason": "tool-calls", + "tokens": { + "input": 6390, + "output": 191, + "reasoning": 0, + "cache": { + "read": 0, + "write": 0 + } + }, + "cost": 0 + } + ] + }, + { + "info": { + "id": "message_mj16h8o2001v", + "sessionID": "session_mj169i5z0000", + "role": "assistant", + "parentID": "message_mj16f4sa0000", + "modelID": "ep-20250821060450-4bc6g", + "providerID": "openai-compatible", + "time": { + "created": 1765441822754, + "completed": 1765441825729 + }, + "cost": 0, + "tokens": { + "input": 6637, + "output": 97, + "reasoning": 0, + "cache": { + "read": 0, + "write": 0 + } + }, + "finish": "tool-calls" + }, + "parts": [ + { + "id": "part_mj16h8r0001w", + "sessionID": "session_mj169i5z0000", + "messageID": "message_mj16h8o2001v", + "type": "step-start" + }, + { + "id": "part_mj16hayn001x", + "sessionID": "session_mj169i5z0000", + "messageID": "message_mj16h8o2001v", + "type": "tool", + "tool": "read", + "callID": "read:2", + "state": { + "status": "completed", + "input": { + "filePath": "/Users/joohwi.lee/telnet2/opencode/packages/kiana/package.json" + }, + "output": "\n 1\t{\n 2\t \"name\": \"kiana\",\n 3\t \"version\": \"0.1.0\",\n 4\t \"description\": \"Kiana - Minimal headless coding agent\",\n 5\t \"type\": \"module\",\n 6\t \"main\": \"./dist/index.js\",\n 7\t \"types\": \"./dist/index.d.ts\",\n 8\t \"bin\": {\n 9\t \"kiana\": \"./dist/cli.js\"\n 10\t },\n 11\t \"exports\": {\n 12\t \".\": {\n 13\t \"types\": \"./dist/index.d.ts\",\n 14\t \"import\": \"./dist/index.js\"\n 15\t }\n 16\t },\n 17\t \"scripts\": {\n 18\t \"build\": \"tsc\",\n 19\t \"dev\": \"tsc --watch\",\n 20\t \"typecheck\": \"tsc --noEmit\"\n 21\t },\n 22\t \"dependencies\": {\n 23\t \"ai\": \"beta\",\n 24\t \"@ai-sdk/anthropic\": \"beta\",\n 25\t \"@ai-sdk/openai\": \"beta\",\n 26\t \"@ai-sdk/openai-compatible\": \"beta\",\n 27\t \"@ai-sdk/google\": \"beta\",\n 28\t \"zod\": \"latest\",\n 29\t \"jsonc-parser\": \"latest\"\n 30\t },\n 31\t \"devDependencies\": {\n 32\t \"typescript\": \"latest\",\n 33\t \"@types/node\": \"latest\"\n 34\t },\n 35\t \"engines\": {\n 36\t \"node\": \">=20\"\n 37\t },\n 38\t \"license\": \"MIT\"\n 39\t}\n 40\t\n\n(End of file - total 40 lines)\n", + "title": "package.json", + "metadata": { + "preview": "{\n \"name\": \"kiana\",\n \"version\": \"0.1.0\",\n \"description\": \"Kiana - Minimal headless coding agent\",\n \"type\": \"module\",\n \"main\": \"./dist/index.js\",\n \"types\": \"./dist/index.d.ts\",\n \"bin\": {\n \"kiana\": \"./dist/cli.js\"\n },\n \"exports\": {\n \".\": {\n \"types\": \"./dist/index.d.ts\",\n \"import\": \"./dist/index.js\"\n }\n },\n \"scripts\": {\n \"build\": \"tsc\",\n \"dev\": \"tsc --watch\",\n \"typecheck\": \"tsc --noEmit\"" + }, + "time": { + "start": 1765441825727, + "end": 1765441825728 + } + } + }, + { + "id": "part_mj16hayn001y", + "sessionID": "session_mj169i5z0000", + "messageID": "message_mj16h8o2001v", + "type": "tool", + "tool": "read", + "callID": "read:3", + "state": { + "status": "completed", + "input": { + "filePath": "/Users/joohwi.lee/telnet2/opencode/packages/kiana/src/index.ts" + }, + "output": "\n 1\t// Kiana - Minimal Headless Coding Agent Library\n 2\t// A standalone TypeScript package providing a headless coding agent library + CLI wrapper\n 3\t\n 4\t// Session API\n 5\texport { createSession, type Session, type SessionOptions, type SessionState, type MessageWithParts } from \"./session.js\"\n 6\t\n 7\t// Config\n 8\texport { loadConfig, writeConfigTemplate, type Config, type ProviderConfig } from \"./config.js\"\n 9\t\n 10\t// Provider\n 11\texport { createLanguageModel } from \"./provider.js\"\n 12\t\n 13\t// Event system\n 14\texport { EventBus, type EventTypes, type SessionEvent, type MessageEvent, type PartEvent, type TodoEvent } from \"./event.js\"\n 15\t\n 16\t// Types\n 17\texport type { SessionInfo } from \"./types/session.js\"\n 18\texport type { MessageInfo } from \"./types/message.js\"\n 19\texport type {\n 20\t Part,\n 21\t TextPart,\n 22\t ReasoningPart,\n 23\t ToolPart,\n 24\t ToolState,\n 25\t FilePart,\n 26\t StepStartPart,\n 27\t StepFinishPart,\n 28\t SnapshotPart,\n 29\t PatchPart,\n 30\t AgentPart,\n 31\t RetryPart,\n 32\t CompactionPart,\n 33\t SubtaskPart,\n 34\t} from \"./types/part.js\"\n 35\t\n 36\t// Tools\n 37\texport {\n 38\t allTools,\n 39\t getTools,\n 40\t getToolInfo,\n 41\t defineTool,\n 42\t type Tool,\n 43\t type ToolContext,\n 44\t type ToolResult,\n 45\t type ToolInfo,\n 46\t} from \"./tool/index.js\"\n 47\t\n 48\t// Individual tools (for advanced usage)\n 49\texport { bashTool } from \"./tool/bash.js\"\n 50\texport { readTool } from \"./tool/read.js\"\n 51\texport { writeTool } from \"./tool/write.js\"\n 52\texport { editTool } from \"./tool/edit.js\"\n 53\texport { globTool } from \"./tool/glob.js\"\n 54\texport { grepTool } from \"./tool/grep.js\"\n 55\texport { listTool } from \"./tool/list.js\"\n 56\texport { webfetchTool } from \"./tool/webfetch.js\"\n 57\texport { websearchTool } from \"./tool/websearch.js\"\n 58\texport { codesearchTool } from \"./tool/codesearch.js\"\n 59\texport { todoWriteTool, todoReadTool, getTodos, setTodos } from \"./tool/todo.js\"\n 60\texport { taskTool, setSubagentExecutor } from \"./tool/task.js\"\n 61\t\n 62\t// Version\n 63\texport const VERSION = \"0.1.0\"\n 64\t\n\n(End of file - total 64 lines)\n", + "title": "src/index.ts", + "metadata": { + "preview": "// Kiana - Minimal Headless Coding Agent Library\n// A standalone TypeScript package providing a headless coding agent library + CLI wrapper\n\n// Session API\nexport { createSession, type Session, type SessionOptions, type SessionState, type MessageWithParts } from \"./session.js\"\n\n// Config\nexport { loadConfig, writeConfigTemplate, type Config, type ProviderConfig } from \"./config.js\"\n\n// Provider\nexport { createLanguageModel } from \"./provider.js\"\n\n// Event system\nexport { EventBus, type EventTypes, type SessionEvent, type MessageEvent, type PartEvent, type TodoEvent } from \"./event.js\"\n\n// Types\nexport type { SessionInfo } from \"./types/session.js\"\nexport type { MessageInfo } from \"./types/message.js\"\nexport type {\n Part," + }, + "time": { + "start": 1765441825727, + "end": 1765441825728 + } + } + }, + { + "id": "part_mj16hayn001z", + "sessionID": "session_mj169i5z0000", + "messageID": "message_mj16h8o2001v", + "type": "tool", + "tool": "read", + "callID": "read:4", + "state": { + "status": "completed", + "input": { + "filePath": "/Users/joohwi.lee/telnet2/opencode/packages/kiana/src/cli.ts" + }, + "output": "\n 1\t#!/usr/bin/env node\n 2\t\n 3\timport * as fs from \"node:fs\"\n 4\timport * as path from \"node:path\"\n 5\timport * as readline from \"node:readline\"\n 6\timport { createSession } from \"./session.js\"\n 7\timport { loadConfig, writeConfigTemplate } from \"./config.js\"\n 8\timport { VERSION } from \"./index.js\"\n 9\t\n 10\tinterface CliArgs {\n 11\t config?: string\n 12\t session?: string\n 13\t cwd?: string\n 14\t createConfig?: string\n 15\t humanReadable?: boolean\n 16\t verbose?: boolean\n 17\t prompt?: string\n 18\t help?: boolean\n 19\t version?: boolean\n 20\t}\n 21\t\n 22\tfunction parseArgs(args: string[]): CliArgs {\n 23\t const result: CliArgs = {}\n 24\t\n 25\t for (let i = 0; i < args.length; i++) {\n 26\t const arg = args[i]\n 27\t\n 28\t if (arg === \"--config\" || arg === \"-c\") {\n 29\t result.config = args[++i]\n 30\t } else if (arg === \"--session\" || arg === \"-s\") {\n 31\t result.session = args[++i]\n 32\t } else if (arg === \"--cwd\") {\n 33\t result.cwd = args[++i]\n 34\t } else if (arg === \"--create-config\") {\n 35\t result.createConfig = args[++i]\n 36\t } else if (arg === \"--human-readable\" || arg === \"-H\") {\n 37\t result.humanReadable = true\n 38\t } else if (arg === \"--prompt\" || arg === \"-p\") {\n 39\t result.prompt = args[++i]\n 40\t } else if (arg === \"--verbose\") {\n 41\t result.verbose = true\n 42\t } else if (arg === \"--help\" || arg === \"-h\") {\n 43\t result.help = true\n 44\t } else if (arg === \"--version\" || arg === \"-V\") {\n 45\t result.version = true\n 46\t }\n 47\t }\n 48\t\n 49\t return result\n 50\t}\n 51\t\n 52\tfunction printHelp(): void {\n 53\t console.log(`\n 54\tKiana - Minimal Headless Coding Agent\n 55\t\n 56\tUsage:\n 57\t kiana --config [options]\n 58\t kiana --create-config \n 59\t\n 60\tOptions:\n 61\t --config, -c Path to JSONC config file (required)\n 62\t --prompt, -p Initial prompt to send (exits after completion)\n 63\t --session, -s

Directory to persist session\n 64\t --cwd Working directory for tools\n 65\t --human-readable, -H Output in human-readable format (streaming text)\n 66\t --verbose Show detailed tool inputs/outputs (with -H)\n 67\t --create-config Write config template and exit\n 68\t --help, -h Show this help message\n 69\t --version, -V Show version\n 70\t\n 71\tStdin/Stdout Protocol:\n 72\t Input (stdin): Line-delimited JSON commands\n 73\t {\"type\": \"message\", \"text\": \"Fix the bug in auth.ts\"}\n 74\t {\"type\": \"abort\"}\n 75\t\n 76\t Output (stdout): Line-delimited JSON events\n 77\t {\"type\": \"session.created\", \"properties\": {...}}\n 78\t {\"type\": \"message.part.updated\", \"properties\": {...}}\n 79\t\n 80\tExample:\n 81\t kiana --config ./kiana.jsonc\n 82\t echo '{\"type\":\"message\",\"text\":\"Hello\"}' | kiana --config ./kiana.jsonc\n 83\t`)\n 84\t}\n 85\t\n 86\tfunction printVersion(): void {\n 87\t console.log(`kiana v${VERSION}`)\n 88\t}\n 89\t\n 90\tinterface StdinCommand {\n 91\t type: \"message\" | \"abort\"\n 92\t text?: string\n 93\t}\n 94\t\n 95\t// ANSI color codes\n 96\tconst colors = {\n 97\t reset: \"\\x1b[0m\",\n 98\t bold: \"\\x1b[1m\",\n 99\t dim: \"\\x1b[2m\",\n 100\t cyan: \"\\x1b[36m\",\n 101\t green: \"\\x1b[32m\",\n 102\t yellow: \"\\x1b[33m\",\n 103\t blue: \"\\x1b[34m\",\n 104\t magenta: \"\\x1b[35m\",\n 105\t red: \"\\x1b[31m\",\n 106\t gray: \"\\x1b[90m\",\n 107\t}\n 108\t\n 109\t// Track tool calls that have already shown their \"running\" header\n 110\tconst seenRunningTools = new Set()\n 111\t\n 112\t// Human-readable event formatter\n 113\tfunction formatEventHumanReadable(event: any, verbose: boolean = false): string | null {\n 114\t const indent = event.context ? \" \".repeat(event.context.depth) : \"\"\n 115\t const agentPrefix = event.context\n 116\t ? `${colors.magenta}[${event.context.agentType || \"subagent\"}]${colors.reset} `\n 117\t : \"\"\n 118\t\n 119\t switch (event.type) {\n 120\t case \"session.created\":\n 121\t return `${colors.dim}Session started: ${event.properties.session.id}${colors.reset}\\n`\n 122\t\n 123\t case \"session.idle\":\n 124\t return `${colors.dim}--- Session idle ---${colors.reset}\\n`\n 125\t\n 126\t case \"message.created\":\n 127\t if (event.properties.message.role === \"user\") {\n 128\t return null // User message text is shown via part.updated\n 129\t }\n 130\t return null // Don't show assistant message creation\n 131\t\n 132\t case \"message.part.updated\": {\n 133\t const part = event.properties.part\n 134\t const delta = event.properties.delta\n 135\t\n 136\t if (part.type === \"text\") {\n 137\t if (delta) {\n 138\t // Streaming text delta\n 139\t return `${indent}${agentPrefix}${delta}`\n 140\t }\n 141\t // Non-streaming: print full text\n 142\t if (part.text) {\n 143\t return `${indent}${agentPrefix}${part.text}\\n`\n 144\t }\n 145\t return null\n 146\t }\n 147\t\n 148\t if (part.type === \"reasoning\") {\n 149\t if (delta) {\n 150\t return `${indent}${agentPrefix}${colors.dim}${delta}${colors.reset}`\n 151\t }\n 152\t return null\n 153\t }\n 154\t\n 155\t if (part.type === \"tool\") {\n 156\t const state = part.state\n 157\t if (state.status === \"running\") {\n 158\t // Only show the \"running\" header once per tool call\n 159\t const toolKey = `${part.id}:${part.callID}`\n 160\t if (seenRunningTools.has(toolKey)) {\n 161\t return null // Already shown\n 162\t }\n 163\t seenRunningTools.add(toolKey)\n 164\t\n 165\t const toolName = part.tool\n 166\t const title = state.title || formatToolInput(toolName, state.input)\n 167\t let output = `\\n${indent}${agentPrefix}${colors.cyan}▶ ${toolName}${colors.reset} ${colors.gray}${title}${colors.reset}\\n`\n 168\t // In verbose mode, show the full input\n 169\t if (verbose && state.input && Object.keys(state.input).length > 0) {\n 170\t const inputStr = JSON.stringify(state.input, null, 2)\n 171\t .split(\"\\n\")\n 172\t .map((line) => `${indent} ${colors.dim}${line}${colors.reset}`)\n 173\t .join(\"\\n\")\n 174\t output += `${inputStr}\\n`\n 175\t }\n 176\t return output\n 177\t }\n 178\t if (state.status === \"completed\") {\n 179\t const title = state.title || part.tool\n 180\t // Check if this is actually an error wrapped as a completed result\n 181\t const isError = state.metadata?.error || state.metadata?.validationError || state.metadata?.parseError\n 182\t const icon = isError ? \"✗\" : \"✓\"\n 183\t const color = isError ? colors.red : colors.green\n 184\t let output = `${indent}${agentPrefix}${color}${icon} ${title}${colors.reset}\\n`\n 185\t // In verbose mode, show the output (truncated)\n 186\t if (verbose && state.output) {\n 187\t const outputLines = String(state.output).split(\"\\n\").slice(0, 10)\n 188\t if (String(state.output).split(\"\\n\").length > 10) {\n 189\t outputLines.push(\"... (truncated)\")\n 190\t }\n 191\t const outputStr = outputLines\n 192\t .map((line) => `${indent} ${colors.dim}${line}${colors.reset}`)\n 193\t .join(\"\\n\")\n 194\t output += `${outputStr}\\n`\n 195\t }\n 196\t return output\n 197\t }\n 198\t if (state.status === \"error\") {\n 199\t let errorOutput = `${indent}${agentPrefix}${colors.red}✗ ${part.tool}: ${state.error}${colors.reset}\\n`\n 200\t // Always show the input that caused the error for debugging\n 201\t if (state.input && Object.keys(state.input).length > 0) {\n 202\t const inputStr = JSON.stringify(state.input, null, 2)\n 203\t .split(\"\\n\")\n 204\t .map((line) => `${indent} ${colors.dim}${line}${colors.reset}`)\n 205\t .join(\"\\n\")\n 206\t errorOutput += `${inputStr}\\n`\n 207\t }\n 208\t return errorOutput\n 209\t }\n 210\t return null\n 211\t }\n 212\t\n 213\t if (part.type === \"step-start\") {\n 214\t return null // Don't show step start\n 215\t }\n 216\t\n 217\t if (part.type === \"step-finish\") {\n 218\t return `\\n` // Add newline after step\n 219\t }\n 220\t\n 221\t return null\n 222\t }\n 223\t\n 224\t case \"message.updated\": {\n 225\t const msg = event.properties.message\n 226\t if (msg.error) {\n 227\t return `${colors.red}Error: ${msg.error.message || msg.error}${colors.reset}\\n`\n 228\t }\n 229\t return null\n 230\t }\n 231\t\n 232\t case \"todo.updated\": {\n 233\t const todos = event.properties.todos\n 234\t if (!todos || todos.length === 0) return null\n 235\t\n 236\t let output = `\\n${colors.yellow}📋 Todos:${colors.reset}\\n`\n 237\t for (const todo of todos) {\n 238\t const icon = todo.status === \"completed\" ? \"✓\" : todo.status === \"in_progress\" ? \"▶\" : \"○\"\n 239\t const color = todo.status === \"completed\" ? colors.green : todo.status === \"in_progress\" ? colors.cyan : colors.gray\n 240\t output += ` ${color}${icon} ${todo.content}${colors.reset}\\n`\n 241\t }\n 242\t return output\n 243\t }\n 244\t\n 245\t default:\n 246\t return null\n 247\t }\n 248\t}\n 249\t\n 250\tfunction formatToolInput(toolName: string, input: Record): string {\n 251\t switch (toolName) {\n 252\t case \"read\":\n 253\t return String(input.filePath || input.file_path || \"\")\n 254\t case \"write\":\n 255\t return String(input.filePath || input.file_path || \"\")\n 256\t case \"edit\":\n 257\t return String(input.filePath || input.file_path || \"\")\n 258\t case \"bash\":\n 259\t return String(input.command || \"\").slice(0, 50) + (String(input.command || \"\").length > 50 ? \"...\" : \"\")\n 260\t case \"glob\":\n 261\t return String(input.pattern || \"\")\n 262\t case \"grep\":\n 263\t return String(input.pattern || \"\")\n 264\t case \"list\":\n 265\t return String(input.path || \".\")\n 266\t case \"task\":\n 267\t return String(input.description || \"\")\n 268\t default:\n 269\t return \"\"\n 270\t }\n 271\t}\n 272\t\n 273\tasync function main(): Promise {\n 274\t const args = parseArgs(process.argv.slice(2))\n 275\t\n 276\t if (args.help) {\n 277\t printHelp()\n 278\t process.exit(0)\n 279\t }\n 280\t\n 281\t if (args.version) {\n 282\t printVersion()\n 283\t process.exit(0)\n 284\t }\n 285\t\n 286\t if (args.createConfig) {\n 287\t const configPath = path.resolve(args.createConfig)\n 288\t writeConfigTemplate(configPath)\n 289\t console.error(`Config template written to: ${configPath}`)\n 290\t process.exit(0)\n 291\t }\n 292\t\n 293\t if (!args.config) {\n 294\t console.error(\"Error: --config is required\")\n 295\t console.error(\"Use --help for usage information\")\n 296\t process.exit(1)\n 297\t }\n 298\t\n 299\t const configPath = path.resolve(args.config)\n 300\t\n 301\t if (!fs.existsSync(configPath)) {\n 302\t console.error(`Error: Config file not found: ${configPath}`)\n 303\t process.exit(1)\n 304\t }\n 305\t\n 306\t // Load config\n 307\t const config = loadConfig(configPath)\n 308\t\n 309\t // Override working directory if specified\n 310\t if (args.cwd) {\n 311\t config.workingDirectory = path.resolve(args.cwd)\n 312\t }\n 313\t\n 314\t // Create session with optional persistence\n 315\t const sessionDir = args.session ? path.resolve(args.session) : undefined\n 316\t const session = await createSession(config, { sessionDir })\n 317\t\n 318\t // Subscribe to events and output\n 319\t if (args.humanReadable) {\n 320\t const verbose = args.verbose ?? false\n 321\t session.onEvent((event) => {\n 322\t const formatted = formatEventHumanReadable(event, verbose)\n 323\t if (formatted) {\n 324\t process.stdout.write(formatted)\n 325\t }\n 326\t })\n 327\t } else {\n 328\t session.onEvent((event) => {\n 329\t console.log(JSON.stringify(event))\n 330\t })\n 331\t }\n 332\t\n 333\t // If --prompt is provided, send it and exit after completion\n 334\t if (args.prompt) {\n 335\t await session.sendMessage(args.prompt)\n 336\t process.exit(0)\n 337\t }\n 338\t\n 339\t // Otherwise, read commands from stdin\n 340\t let pendingMessage: Promise | null = null\n 341\t let stdinClosed = false\n 342\t\n 343\t const rl = readline.createInterface({\n 344\t input: process.stdin,\n 345\t output: undefined,\n 346\t terminal: false,\n 347\t })\n 348\t\n 349\t rl.on(\"line\", async (line) => {\n 350\t if (!line.trim()) return\n 351\t\n 352\t try {\n 353\t const command: StdinCommand = JSON.parse(line)\n 354\t\n 355\t switch (command.type) {\n 356\t case \"message\":\n 357\t if (command.text) {\n 358\t pendingMessage = session.sendMessage(command.text)\n 359\t await pendingMessage\n 360\t pendingMessage = null\n 361\t // If stdin already closed, exit after message completes\n 362\t if (stdinClosed) {\n 363\t process.exit(0)\n 364\t }\n 365\t }\n 366\t break\n 367\t\n 368\t case \"abort\":\n 369\t session.abort()\n 370\t break\n 371\t\n 372\t default:\n 373\t console.error(JSON.stringify({ error: `Unknown command type: ${(command as any).type}` }))\n 374\t }\n 375\t } catch (error) {\n 376\t console.error(\n 377\t JSON.stringify({\n 378\t error: `Failed to parse command: ${error instanceof Error ? error.message : String(error)}`,\n 379\t })\n 380\t )\n 381\t }\n 382\t })\n 383\t\n 384\t rl.on(\"close\", async () => {\n 385\t stdinClosed = true\n 386\t // Wait for any pending message to complete before exiting\n 387\t if (pendingMessage) {\n 388\t await pendingMessage\n 389\t }\n 390\t process.exit(0)\n 391\t })\n 392\t\n 393\t // Handle signals\n 394\t process.on(\"SIGINT\", () => {\n 395\t session.abort()\n 396\t process.exit(0)\n 397\t })\n 398\t\n 399\t process.on(\"SIGTERM\", () => {\n 400\t session.abort()\n 401\t process.exit(0)\n 402\t })\n 403\t}\n 404\t\n 405\tmain().catch((error) => {\n 406\t console.error(`Fatal error: ${error instanceof Error ? error.message : String(error)}`)\n 407\t process.exit(1)\n 408\t})\n 409\t\n\n(End of file - total 409 lines)\n", + "title": "src/cli.ts", + "metadata": { + "preview": "#!/usr/bin/env node\n\nimport * as fs from \"node:fs\"\nimport * as path from \"node:path\"\nimport * as readline from \"node:readline\"\nimport { createSession } from \"./session.js\"\nimport { loadConfig, writeConfigTemplate } from \"./config.js\"\nimport { VERSION } from \"./index.js\"\n\ninterface CliArgs {\n config?: string\n session?: string\n cwd?: string\n createConfig?: string\n humanReadable?: boolean\n verbose?: boolean\n prompt?: string\n help?: boolean\n version?: boolean\n}" + }, + "time": { + "start": 1765441825727, + "end": 1765441825728 + } + } + }, + { + "id": "part_mj16hayo0020", + "sessionID": "session_mj169i5z0000", + "messageID": "message_mj16h8o2001v", + "type": "step-finish", + "reason": "tool-calls", + "tokens": { + "input": 6637, + "output": 97, + "reasoning": 0, + "cache": { + "read": 0, + "write": 0 + } + }, + "cost": 0 + } + ] + }, + { + "info": { + "id": "message_mj16hayp0021", + "sessionID": "session_mj169i5z0000", + "role": "assistant", + "parentID": "message_mj16f4sa0000", + "modelID": "ep-20250821060450-4bc6g", + "providerID": "openai-compatible", + "time": { + "created": 1765441825729, + "completed": 1765441841204 + }, + "cost": 0, + "tokens": { + "input": 12157, + "output": 732, + "reasoning": 0, + "cache": { + "read": 0, + "write": 0 + } + }, + "finish": "stop" + }, + "parts": [ + { + "id": "part_mj16hb1d0022", + "sessionID": "session_mj169i5z0000", + "messageID": "message_mj16hayp0021", + "type": "step-start" + }, + { + "id": "part_mj16hmwj0023", + "sessionID": "session_mj169i5z0000", + "messageID": "message_mj16hayp0021", + "type": "text", + "text": "## Analysis: Kiana - Minimal Headless Coding Agent\n\n**Kiana** is a minimal, headless coding agent designed as a standalone TypeScript package that provides both a **library API** and a **CLI wrapper**. Based on my analysis, here's what this project is about:\n\n### Core Purpose\nKiana is a lightweight AI-powered coding assistant that operates in a headless (non-interactive) mode. It provides a framework for AI agents to perform software engineering tasks using a comprehensive set of development tools.\n\n### Architecture Overview\n\n**1. Dual Interface Design:**\n- **Library API**: Exportable functions for programmatic use\n- **CLI Tool**: Command-line interface for direct usage\n\n**2. Event-Driven Architecture:**\n- Uses an event bus system for real-time communication\n- Supports streaming responses and tool execution updates\n- JSON-based protocol for stdin/stdout communication\n\n**3. Modular Tool System:**\n- **15 specialized tools** organized in `src/tool/`:\n - File operations: `read`, `write`, `edit`, `list`, `glob`\n - Code analysis: `grep`, `codesearch`\n - System operations: `bash`\n - Web operations: `webfetch`, `websearch`\n - Task management: `task`, `todo`\n - Development utilities: All tools have proper TypeScript typing\n\n### Key Components\n\n**Session Management (`src/session.ts`):**\n- Creates and manages AI agent sessions\n- Handles message persistence and state management\n- Supports session directories for continuity\n\n**Provider System (`src/provider.ts`):**\n- Multi-provider support (OpenAI, Anthropic, Google)\n- Uses the `ai` SDK beta for language model integration\n- Configurable through JSONC configuration files\n\n**Event System (`src/event.ts`):**\n- Real-time event streaming\n- Supports different event types: session, message, part, todo\n- Human-readable formatting for CLI usage\n\n**Configuration (`src/config.ts`):**\n- JSONC-based configuration\n- Provider-specific settings\n- Working directory management\n\n### Unique Features\n\n1. **Headless Operation**: Designed for non-interactive, automated workflows\n2. **Streaming Protocol**: JSONL (JSON Lines) protocol for stdin/stdout\n3. **Tool Abstraction**: Clean separation between tools and AI logic\n4. **Subagent Support**: Hierarchical task delegation via the `task` tool\n5. **Session Persistence**: Optional state persistence across runs\n6. **Human-Readable Mode**: Optional colored CLI output for debugging\n\n### Usage Patterns\n\n**CLI Mode:**\n```bash\nkiana --config kiana.jsonc --prompt \"Fix the bug in auth.ts\"\n```\n\n**Library Mode:**\n```typescript\nimport { createSession, loadConfig } from 'kiana'\nconst session = await createSession(config)\nawait session.sendMessage(\"Implement user authentication\")\n```\n\n**Streaming Protocol:**\n```json\n// Input (stdin)\n{\"type\": \"message\", \"text\": \"List all TypeScript files\"}\n\n// Output (stdout)\n{\"type\": \"session.created\", \"properties\": {...}}\n{\"type\": \"message.part.updated\", \"properties\": {...}}\n```\n\n### Design Philosophy\nKiana embraces minimalism while providing comprehensive tooling. It's designed to be:\n- **Lightweight**: Minimal dependencies, focused functionality\n- **Composable**: Tools can be combined for complex workflows\n- **Extensible**: Easy to add new tools and providers\n- **Observable**: Rich event system for monitoring and debugging\n\nThis makes Kiana ideal for automated CI/CD pipelines, development workflows, or as a foundation for more complex AI-powered development tools.", + "time": { + "start": 1765441841203, + "end": 1765441841203 + } + }, + { + "id": "part_mj16hmwj0024", + "sessionID": "session_mj169i5z0000", + "messageID": "message_mj16hayp0021", + "type": "step-finish", + "reason": "stop", + "tokens": { + "input": 12157, + "output": 732, + "reasoning": 0, + "cache": { + "read": 0, + "write": 0 + } + }, + "cost": 0 + } + ] + }, + { + "info": { + "id": "message_mj16iygm0000", + "sessionID": "session_mj169i5z0000", + "role": "user", + "time": { + "created": 1765441902838 + } + }, + "parts": [ + { + "id": "part_mj16iygm0001", + "sessionID": "session_mj169i5z0000", + "messageID": "message_mj16iygm0000", + "type": "text", + "text": "analyze this directory, please analyze what it is about" + } + ] + }, + { + "info": { + "id": "message_mj16iygo0002", + "sessionID": "session_mj169i5z0000", + "role": "assistant", + "parentID": "message_mj16iygm0000", + "modelID": "ep-20250821060450-4bc6g", + "providerID": "openai-compatible", + "time": { + "created": 1765441902840, + "completed": 1765441929511 + }, + "cost": 0, + "tokens": { + "input": 0, + "output": 0, + "reasoning": 0, + "cache": { + "read": 0, + "write": 0 + } + }, + "finish": "stop" + }, + "parts": [ + { + "id": "part_mj16iyjz0003", + "sessionID": "session_mj169i5z0000", + "messageID": "message_mj16iygo0002", + "type": "step-start" + }, + { + "id": "part_mj16j0mw0004", + "sessionID": "session_mj169i5z0000", + "messageID": "message_mj16iygo0002", + "type": "text", + "text": "## Kiana: Minimal Headless Coding Agent - Comprehensive Analysis\n\nBased on my analysis of the codebase, **Kiana** is a sophisticated, minimal headless coding agent that provides both a **library API** and **CLI wrapper** for AI-powered software development tasks.\n\n### Core Identity\n**Kiana** positions itself as a \"minimal headless coding agent\" - a lightweight, non-interactive AI assistant designed for automated software development workflows. It's built as a standalone TypeScript package that can be used both programmatically and via command-line.\n\n### Architecture Overview\n\n#### 1. **Dual Interface Design**\n- **Library Mode**: Exportable TypeScript functions for integration into other applications\n- **CLI Mode**: Command-line interface with JSONL protocol for stdin/stdout communication\n\n#### 2. **Event-Driven Architecture**\nThe system uses a sophisticated event bus with real-time streaming:\n- **Session events**: Creation, idle states\n- **Message events**: User/assistant message handling\n- **Part events**: Streaming text, reasoning, tool execution\n- **Todo events**: Task management and progress tracking\n\n#### 3. **Modular Tool System** (15 specialized tools)\nThe `src/tool/` directory contains a comprehensive toolkit:\n\n**File Operations:**\n- `read.ts` - File reading with line limits\n- `write.ts` - File writing and creation\n- `edit.ts` - Precise string replacement editing\n- `list.ts` - Directory listing with glob filtering\n- `glob.ts` - Advanced file pattern matching\n\n**Code Analysis:**\n- `grep.ts` - Content searching with regex\n- `codesearch.ts` - Code-specific search functionality\n\n**System Operations:**\n- `bash.ts` - Command execution with timeout and security\n\n**Web Operations:**\n- `webfetch.ts` - HTTP content fetching\n- `websearch.ts` - Web search integration\n\n**Task Management:**\n- `task.ts` - Subagent creation for complex workflows\n- `todo.ts` - Todo list management and tracking\n\n**Utilities:**\n- `tool.ts` - Base tool interface and definitions\n- `index.ts` - Tool registry and management\n\n### Key Design Patterns\n\n#### 1. **Headless Operation**\n- Designed for non-interactive, automated workflows\n- JSONL protocol for stdin/stdout communication\n- No human intervention required during execution\n\n#### 2. **Provider Abstraction**\n- Multi-provider support via `ai` SDK beta\n- OpenAI, Anthropic, Google providers\n- Configurable through JSONC configuration files\n\n#### 3. **Session Persistence**\n- Optional session directory for state persistence\n- Message history and context retention\n- Working directory management\n\n#### 4. **Subagent Architecture**\n- Hierarchical task delegation via `task` tool\n- Context-aware subagents with depth tracking\n- Recursive problem decomposition\n\n### Usage Patterns\n\n#### **CLI Usage:**\n```bash\n# Interactive mode with JSONL protocol\nkiana --config kiana.jsonc\n\n# One-shot execution\nkiana --config kiana.jsonc --prompt \"Fix the bug in auth.ts\"\n\n# Human-readable streaming\nkiana --config kiana.jsonc --human-readable --prompt \"List all TypeScript files\"\n```\n\n#### **Library Usage:**\n```typescript\nimport { createSession, loadConfig } from 'kiana'\n\nconst config = await loadConfig('./kiana.jsonc')\nconst session = await createSession(config)\n\n// Send message and stream results\nsession.onEvent((event) => {\n console.log('Received:', event)\n})\n\nawait session.sendMessage(\"Implement user authentication\")\n```\n\n#### **Streaming Protocol:**\n```json\n// Input (stdin)\n{\"type\": \"message\", \"text\": \"Find all console.log statements\"}\n\n// Output (stdout)\n{\"type\": \"session.created\", \"properties\": {...}}\n{\"type\": \"message.part.updated\", \"properties\": {...}}\n{\"type\": \"tool\", \"properties\": {\"tool\": \"grep\", \"state\": {\"status\": \"running\"}}}\n```\n\n### Unique Features\n\n#### 1. **Human-Readable Mode**\n- Optional colored CLI output for debugging\n- Real-time tool execution visualization\n- Progress tracking with todo lists\n\n#### 2. **Tool Context System**\n- Each tool execution has context (depth, agent type)\n- Visual indentation for subagent hierarchies\n- Error handling with detailed debugging info\n\n#### 3. **Compaction System**\n- Automatic message history compaction\n- Configurable context window management\n- Efficient token usage\n\n#### 4. **Validation & Security**\n- Tool input validation with Zod schemas\n- Command timeout handling\n- Safe file system operations\n\n### Configuration System\n- **JSONC format** (JSON with comments)\n- Provider-specific settings\n- Working directory configuration\n- Tool-specific parameters\n\n### Development Philosophy\nKiana embraces **minimalism with power**:\n- **Minimal dependencies**: Only essential packages (`ai`, `zod`, `jsonc-parser`)\n- **Maximum functionality**: Comprehensive toolset for software development\n- **Clean abstractions**: Clear separation between tools, providers, and sessions\n- **Extensibility**: Easy to add new tools and providers\n- **Observability**: Rich event system for monitoring and debugging\n\n### Target Use Cases\n1. **CI/CD Integration**: Automated code review and fixes\n2. **Development Workflows**: Automated refactoring and testing\n3. **Code Analysis**: Large-scale codebase analysis\n4. **Documentation Generation**: Automated API docs\n5. **Research Projects**: AI-powered code exploration\n\nKiana represents a **modern approach to AI-powered development tools** - lightweight, composable, and designed for the era of automated software engineering workflows.", + "time": { + "start": 1765441905656, + "end": 1765441929507 + } + }, + { + "id": "part_mj16jj1h0005", + "sessionID": "session_mj169i5z0000", + "messageID": "message_mj16iygo0002", + "type": "step-finish", + "reason": "stop", + "tokens": { + "input": 0, + "output": 0, + "reasoning": 0, + "cache": { + "read": 0, + "write": 0 + } + }, + "cost": 0 + } + ] + } +] \ No newline at end of file diff --git a/packages/kiana/tmpx/session.json b/packages/kiana/tmpx/session.json new file mode 100644 index 00000000000..e051aefbdd0 --- /dev/null +++ b/packages/kiana/tmpx/session.json @@ -0,0 +1,11 @@ +{ + "id": "session_mj169i5z0000", + "projectID": "project_mj169i5z0001", + "directory": "/Users/joohwi.lee/telnet2/opencode/packages/kiana", + "title": "Session", + "version": "1.0.0", + "time": { + "created": 1765441929511, + "updated": 1765441929511 + } +} \ No newline at end of file diff --git a/packages/kiana/tmpx2/messages.json b/packages/kiana/tmpx2/messages.json new file mode 100644 index 00000000000..1f4f4369574 --- /dev/null +++ b/packages/kiana/tmpx2/messages.json @@ -0,0 +1,156 @@ +[ + { + "info": { + "id": "message_mj16ed5y0002", + "sessionID": "session_mj16ed5y0000", + "role": "user", + "time": { + "created": 1765441688614 + } + }, + "parts": [ + { + "id": "part_mj16ed5y0003", + "sessionID": "session_mj16ed5y0000", + "messageID": "message_mj16ed5y0002", + "type": "text", + "text": "read the first 10 lines of package.json" + } + ] + }, + { + "info": { + "id": "message_mj16ed5z0004", + "sessionID": "session_mj16ed5y0000", + "role": "assistant", + "parentID": "message_mj16ed5y0002", + "modelID": "ep-20250821060450-4bc6g", + "providerID": "openai-compatible", + "time": { + "created": 1765441688615, + "completed": 1765441691210 + }, + "cost": 0, + "tokens": { + "input": 5879, + "output": 39, + "reasoning": 0, + "cache": { + "read": 0, + "write": 0 + } + }, + "finish": "tool-calls" + }, + "parts": [ + { + "id": "part_mj16edd90005", + "sessionID": "session_mj16ed5y0000", + "messageID": "message_mj16ed5z0004", + "type": "step-start" + }, + { + "id": "part_mj16ef600006", + "sessionID": "session_mj16ed5y0000", + "messageID": "message_mj16ed5z0004", + "type": "tool", + "tool": "read", + "callID": "read:0", + "state": { + "status": "completed", + "input": { + "filePath": "/Users/joohwi.lee/telnet2/opencode/packages/kiana/package.json", + "limit": 10 + }, + "output": "\n 1\t{\n 2\t \"name\": \"kiana\",\n 3\t \"version\": \"0.1.0\",\n 4\t \"description\": \"Kiana - Minimal headless coding agent\",\n 5\t \"type\": \"module\",\n 6\t \"main\": \"./dist/index.js\",\n 7\t \"types\": \"./dist/index.d.ts\",\n 8\t \"bin\": {\n 9\t \"kiana\": \"./dist/cli.js\"\n 10\t },\n\n(File has more lines. Use 'offset' parameter to read beyond line 10)\n", + "title": "package.json", + "metadata": { + "preview": "{\n \"name\": \"kiana\",\n \"version\": \"0.1.0\",\n \"description\": \"Kiana - Minimal headless coding agent\",\n \"type\": \"module\",\n \"main\": \"./dist/index.js\",\n \"types\": \"./dist/index.d.ts\",\n \"bin\": {\n \"kiana\": \"./dist/cli.js\"\n }," + }, + "time": { + "start": 1765441691208, + "end": 1765441691210 + } + } + }, + { + "id": "part_mj16ef620007", + "sessionID": "session_mj16ed5y0000", + "messageID": "message_mj16ed5z0004", + "type": "step-finish", + "reason": "tool-calls", + "tokens": { + "input": 5879, + "output": 39, + "reasoning": 0, + "cache": { + "read": 0, + "write": 0 + } + }, + "cost": 0 + } + ] + }, + { + "info": { + "id": "message_mj16ef620008", + "sessionID": "session_mj16ed5y0000", + "role": "assistant", + "parentID": "message_mj16ed5y0002", + "modelID": "ep-20250821060450-4bc6g", + "providerID": "openai-compatible", + "time": { + "created": 1765441691210, + "completed": 1765441693787 + }, + "cost": 0, + "tokens": { + "input": 6065, + "output": 92, + "reasoning": 0, + "cache": { + "read": 0, + "write": 0 + } + }, + "finish": "stop" + }, + "parts": [ + { + "id": "part_mj16ef900009", + "sessionID": "session_mj16ed5y0000", + "messageID": "message_mj16ef620008", + "type": "step-start" + }, + { + "id": "part_mj16eh5n000a", + "sessionID": "session_mj16ed5y0000", + "messageID": "message_mj16ef620008", + "type": "text", + "text": "Here are the first 10 lines of package.json:\n\n```json\n{\n \"name\": \"kiana\",\n \"version\": \"0.1.0\",\n \"description\": \"Kiana - Minimal headless coding agent\",\n \"type\": \"module\",\n \"main\": \"./dist/index.js\",\n \"types\": \"./dist/index.d.ts\",\n \"bin\": {\n \"kiana\": \"./dist/cli.js\"\n },\n```", + "time": { + "start": 1765441693787, + "end": 1765441693787 + } + }, + { + "id": "part_mj16eh5n000b", + "sessionID": "session_mj16ed5y0000", + "messageID": "message_mj16ef620008", + "type": "step-finish", + "reason": "stop", + "tokens": { + "input": 6065, + "output": 92, + "reasoning": 0, + "cache": { + "read": 0, + "write": 0 + } + }, + "cost": 0 + } + ] + } +] \ No newline at end of file diff --git a/packages/kiana/tmpx2/session.json b/packages/kiana/tmpx2/session.json new file mode 100644 index 00000000000..1daeee91474 --- /dev/null +++ b/packages/kiana/tmpx2/session.json @@ -0,0 +1,11 @@ +{ + "id": "session_mj16ed5y0000", + "projectID": "project_mj16ed5y0001", + "directory": "/Users/joohwi.lee/telnet2/opencode/packages/kiana", + "title": "Session", + "version": "1.0.0", + "time": { + "created": 1765441693787, + "updated": 1765441693787 + } +} \ No newline at end of file diff --git a/packages/kiana/tmpx3/messages.json b/packages/kiana/tmpx3/messages.json new file mode 100644 index 00000000000..010984f1603 --- /dev/null +++ b/packages/kiana/tmpx3/messages.json @@ -0,0 +1,82 @@ +[ + { + "info": { + "id": "message_mj16whvw0002", + "sessionID": "session_mj16whvw0000", + "role": "user", + "time": { + "created": 1765442534540 + } + }, + "parts": [ + { + "id": "part_mj16whvw0003", + "sessionID": "session_mj16whvw0000", + "messageID": "message_mj16whvw0002", + "type": "text", + "text": "say hello" + } + ] + }, + { + "info": { + "id": "message_mj16whvx0004", + "sessionID": "session_mj16whvw0000", + "role": "assistant", + "parentID": "message_mj16whvw0002", + "modelID": "ep-20250821060450-4bc6g", + "providerID": "openai-compatible", + "time": { + "created": 1765442534541, + "completed": 1765442536098 + }, + "cost": 0, + "tokens": { + "input": 0, + "output": 0, + "reasoning": 0, + "cache": { + "read": 0, + "write": 0 + } + }, + "finish": "stop" + }, + "parts": [ + { + "id": "part_mj16wi2h0005", + "sessionID": "session_mj16whvw0000", + "messageID": "message_mj16whvx0004", + "type": "step-start" + }, + { + "id": "part_mj16wj0n0006", + "sessionID": "session_mj16whvw0000", + "messageID": "message_mj16whvx0004", + "type": "text", + "text": "Hello!", + "time": { + "start": 1765442536007, + "end": 1765442536095 + } + }, + { + "id": "part_mj16wj340007", + "sessionID": "session_mj16whvw0000", + "messageID": "message_mj16whvx0004", + "type": "step-finish", + "reason": "stop", + "tokens": { + "input": 0, + "output": 0, + "reasoning": 0, + "cache": { + "read": 0, + "write": 0 + } + }, + "cost": 0 + } + ] + } +] \ No newline at end of file diff --git a/packages/kiana/tmpx3/session.json b/packages/kiana/tmpx3/session.json new file mode 100644 index 00000000000..b818a658ff6 --- /dev/null +++ b/packages/kiana/tmpx3/session.json @@ -0,0 +1,11 @@ +{ + "id": "session_mj16whvw0000", + "projectID": "project_mj16whvw0001", + "directory": "/Users/joohwi.lee/telnet2/opencode/packages/kiana", + "title": "Session", + "version": "1.0.0", + "time": { + "created": 1765442536098, + "updated": 1765442536098 + } +} \ No newline at end of file diff --git a/packages/kiana/tmpx4/messages.json b/packages/kiana/tmpx4/messages.json new file mode 100644 index 00000000000..3e0b45ec9ff --- /dev/null +++ b/packages/kiana/tmpx4/messages.json @@ -0,0 +1,82 @@ +[ + { + "info": { + "id": "message_mj16ykao0002", + "sessionID": "session_mj16ykao0000", + "role": "user", + "time": { + "created": 1765442630976 + } + }, + "parts": [ + { + "id": "part_mj16ykao0003", + "sessionID": "session_mj16ykao0000", + "messageID": "message_mj16ykao0002", + "type": "text", + "text": "say hello" + } + ] + }, + { + "info": { + "id": "message_mj16ykap0004", + "sessionID": "session_mj16ykao0000", + "role": "assistant", + "parentID": "message_mj16ykao0002", + "modelID": "ep-20250821060450-4bc6g", + "providerID": "openai-compatible", + "time": { + "created": 1765442630977, + "completed": 1765442632480 + }, + "cost": 0, + "tokens": { + "input": 5921, + "output": 2, + "reasoning": 0, + "cache": { + "read": 0, + "write": 0 + } + }, + "finish": "stop" + }, + "parts": [ + { + "id": "part_mj16ykgt0005", + "sessionID": "session_mj16ykao0000", + "messageID": "message_mj16ykap0004", + "type": "step-start" + }, + { + "id": "part_mj16ylcu0006", + "sessionID": "session_mj16ykao0000", + "messageID": "message_mj16ykap0004", + "type": "text", + "text": "Hello!", + "time": { + "start": 1765442632350, + "end": 1765442632475 + } + }, + { + "id": "part_mj16ylgd0007", + "sessionID": "session_mj16ykao0000", + "messageID": "message_mj16ykap0004", + "type": "step-finish", + "reason": "stop", + "tokens": { + "input": 5921, + "output": 2, + "reasoning": 0, + "cache": { + "read": 0, + "write": 0 + } + }, + "cost": 0 + } + ] + } +] \ No newline at end of file diff --git a/packages/kiana/tmpx4/session.json b/packages/kiana/tmpx4/session.json new file mode 100644 index 00000000000..d3757ea3567 --- /dev/null +++ b/packages/kiana/tmpx4/session.json @@ -0,0 +1,11 @@ +{ + "id": "session_mj16ykao0000", + "projectID": "project_mj16ykao0001", + "directory": "/Users/joohwi.lee/telnet2/opencode/packages/kiana", + "title": "Session", + "version": "1.0.0", + "time": { + "created": 1765442632480, + "updated": 1765442632480 + } +} \ No newline at end of file From f0eee97874e812731651abe6ffc03fe70219f207 Mon Sep 17 00:00:00 2001 From: Joohwi Lee Date: Thu, 11 Dec 2025 02:45:34 -0800 Subject: [PATCH 3/3] feat: improve CLI output formatting for list tool --- bun.lock | 56 +- packages/kiana-v6/kiana.jsonc | 9 + packages/kiana-v6/package.json | 39 + packages/kiana-v6/src/agent.ts | 922 ++++++++++++ packages/kiana-v6/src/cli.ts | 450 ++++++ packages/kiana-v6/src/config.ts | 123 ++ packages/kiana-v6/src/event.ts | 111 ++ packages/kiana-v6/src/index.ts | 106 ++ packages/kiana-v6/src/provider.ts | 58 + packages/kiana-v6/src/stream.ts | 288 ++++ packages/kiana-v6/src/tool/bash.ts | 252 ++++ packages/kiana-v6/src/tool/codesearch.ts | 135 ++ packages/kiana-v6/src/tool/edit.ts | 648 +++++++++ packages/kiana-v6/src/tool/glob.ts | 78 + packages/kiana-v6/src/tool/grep.ts | 113 ++ packages/kiana-v6/src/tool/index.ts | 111 ++ packages/kiana-v6/src/tool/invalid.ts | 21 + packages/kiana-v6/src/tool/list.ts | 115 ++ packages/kiana-v6/src/tool/read.ts | 259 ++++ packages/kiana-v6/src/tool/task.ts | 176 +++ packages/kiana-v6/src/tool/todo.ts | 109 ++ packages/kiana-v6/src/tool/tool.ts | 77 + packages/kiana-v6/src/tool/webfetch.ts | 242 ++++ packages/kiana-v6/src/tool/websearch.ts | 145 ++ packages/kiana-v6/src/tool/write.ts | 53 + packages/kiana-v6/src/types/event.ts | 184 +++ packages/kiana-v6/src/types/index.ts | 5 + packages/kiana-v6/src/types/message.ts | 96 ++ packages/kiana-v6/src/types/part.ts | 229 +++ packages/kiana-v6/src/types/session.ts | 57 + packages/kiana-v6/src/util/ripgrep.ts | 220 +++ packages/kiana-v6/test-config.jsonc | 10 + packages/kiana-v6/tsconfig.json | 21 + packages/kiana-vs-kianav6.md | 235 +++ packages/kiana/README.md | 207 +++ packages/kiana/coco-output.json | 1668 ++++++++++++++++++++++ packages/kiana/src/config.ts | 10 +- packages/kiana/src/session.ts | 2 + packages/kiana/src/tool/tool.ts | 16 +- 39 files changed, 7652 insertions(+), 4 deletions(-) create mode 100644 packages/kiana-v6/kiana.jsonc create mode 100644 packages/kiana-v6/package.json create mode 100644 packages/kiana-v6/src/agent.ts create mode 100644 packages/kiana-v6/src/cli.ts create mode 100644 packages/kiana-v6/src/config.ts create mode 100644 packages/kiana-v6/src/event.ts create mode 100644 packages/kiana-v6/src/index.ts create mode 100644 packages/kiana-v6/src/provider.ts create mode 100644 packages/kiana-v6/src/stream.ts create mode 100644 packages/kiana-v6/src/tool/bash.ts create mode 100644 packages/kiana-v6/src/tool/codesearch.ts create mode 100644 packages/kiana-v6/src/tool/edit.ts create mode 100644 packages/kiana-v6/src/tool/glob.ts create mode 100644 packages/kiana-v6/src/tool/grep.ts create mode 100644 packages/kiana-v6/src/tool/index.ts create mode 100644 packages/kiana-v6/src/tool/invalid.ts create mode 100644 packages/kiana-v6/src/tool/list.ts create mode 100644 packages/kiana-v6/src/tool/read.ts create mode 100644 packages/kiana-v6/src/tool/task.ts create mode 100644 packages/kiana-v6/src/tool/todo.ts create mode 100644 packages/kiana-v6/src/tool/tool.ts create mode 100644 packages/kiana-v6/src/tool/webfetch.ts create mode 100644 packages/kiana-v6/src/tool/websearch.ts create mode 100644 packages/kiana-v6/src/tool/write.ts create mode 100644 packages/kiana-v6/src/types/event.ts create mode 100644 packages/kiana-v6/src/types/index.ts create mode 100644 packages/kiana-v6/src/types/message.ts create mode 100644 packages/kiana-v6/src/types/part.ts create mode 100644 packages/kiana-v6/src/types/session.ts create mode 100644 packages/kiana-v6/src/util/ripgrep.ts create mode 100644 packages/kiana-v6/test-config.jsonc create mode 100644 packages/kiana-v6/tsconfig.json create mode 100644 packages/kiana-vs-kianav6.md create mode 100644 packages/kiana/README.md create mode 100644 packages/kiana/coco-output.json diff --git a/bun.lock b/bun.lock index 2b785d75325..d6f429fcbd0 100644 --- a/bun.lock +++ b/bun.lock @@ -228,6 +228,26 @@ "typescript": "latest", }, }, + "packages/kiana-v6": { + "name": "kiana-v6", + "version": "0.1.0", + "bin": { + "kiana-v6": "./dist/cli.js", + }, + "dependencies": { + "@ai-sdk/anthropic": "beta", + "@ai-sdk/google": "beta", + "@ai-sdk/openai": "beta", + "@ai-sdk/openai-compatible": "beta", + "ai": "beta", + "jsonc-parser": "latest", + "zod": "latest", + }, + "devDependencies": { + "@types/node": "latest", + "typescript": "latest", + }, + }, "packages/memsh-cli": { "name": "@opencode-ai/memsh-cli", "version": "0.1.0", @@ -2771,6 +2791,8 @@ "kiana": ["kiana@workspace:packages/kiana"], + "kiana-v6": ["kiana-v6@workspace:packages/kiana-v6"], + "kind-of": ["kind-of@6.0.3", "", {}, "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw=="], "kleur": ["kleur@4.1.5", "", {}, "sha512-o+NO+8WrRiQEE4/7nwRJhN1HWpVmJm511pBHUxPLtp0BUISzlBplORYSmTclCnJvQq2tKu/sgl3xVpkc7ZWuQQ=="], @@ -4277,12 +4299,24 @@ "kiana/@ai-sdk/openai-compatible": ["@ai-sdk/openai-compatible@2.0.0-beta.47", "", { "dependencies": { "@ai-sdk/provider": "3.0.0-beta.26", "@ai-sdk/provider-utils": "4.0.0-beta.46" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-S4VbovR3WjWHF5tmNz6KQYnpP3+VSBrsLd+5uewlpv/7dr4BpQuunpMNr/D4G3GogbVRDfnmYPSJ8OKxBb7Veg=="], - "kiana/ai": ["ai@6.0.0-beta.144", "", { "dependencies": { "@ai-sdk/gateway": "2.0.0-beta.75", "@ai-sdk/provider": "3.0.0-beta.26", "@ai-sdk/provider-utils": "4.0.0-beta.46", "@opentelemetry/api": "1.9.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-6+RP0ZCmfYp0omEafUZXj4c466dVR5IpO1TSr4zCwno6Lir+M/SQmD4Vioz3uayC6TWJZWcI04mflGEUcMfb6g=="], + "kiana/ai": ["ai@6.0.0-beta.145", "", { "dependencies": { "@ai-sdk/gateway": "2.0.0-beta.76", "@ai-sdk/provider": "3.0.0-beta.26", "@ai-sdk/provider-utils": "4.0.0-beta.46", "@opentelemetry/api": "1.9.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-hdWnYw2PBA+n/K321o2m1cYzUpgBNZq9zwZ78FcP8JCA4FjlJEAWZz7QLF/3rn5TS45c2GRFn4tFdV17O5t1DQ=="], "kiana/typescript": ["typescript@5.9.3", "", { "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" } }, "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw=="], "kiana/zod": ["zod@4.1.13", "", {}, "sha512-AvvthqfqrAhNH9dnfmrfKzX5upOdjUVJYFqNSlkmGf64gRaTzlPwz99IHYnVs28qYAybvAlBV+H7pn0saFY4Ig=="], + "kiana-v6/@ai-sdk/anthropic": ["@ai-sdk/anthropic@3.0.0-beta.81", "", { "dependencies": { "@ai-sdk/provider": "3.0.0-beta.26", "@ai-sdk/provider-utils": "4.0.0-beta.46" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-ItrE4+G6CzNs41s5E+Velx/HZng9AmecZ1OkLZxF+WUYvvk31nrjBUZgsggLuxJX91RRFqxtgLz8YrR70g7oNw=="], + + "kiana-v6/@ai-sdk/openai": ["@ai-sdk/openai@3.0.0-beta.90", "", { "dependencies": { "@ai-sdk/provider": "3.0.0-beta.26", "@ai-sdk/provider-utils": "4.0.0-beta.46" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-5q0PKRShOPtkkRjaqUrMEUl0FXhFak/e4ldnZxoJ53BMhNNe0/MbTXuqztZBNLCiG1jSLAZRZfqY/Wj75cNIjQ=="], + + "kiana-v6/@ai-sdk/openai-compatible": ["@ai-sdk/openai-compatible@2.0.0-beta.47", "", { "dependencies": { "@ai-sdk/provider": "3.0.0-beta.26", "@ai-sdk/provider-utils": "4.0.0-beta.46" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-S4VbovR3WjWHF5tmNz6KQYnpP3+VSBrsLd+5uewlpv/7dr4BpQuunpMNr/D4G3GogbVRDfnmYPSJ8OKxBb7Veg=="], + + "kiana-v6/ai": ["ai@6.0.0-beta.145", "", { "dependencies": { "@ai-sdk/gateway": "2.0.0-beta.76", "@ai-sdk/provider": "3.0.0-beta.26", "@ai-sdk/provider-utils": "4.0.0-beta.46", "@opentelemetry/api": "1.9.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-hdWnYw2PBA+n/K321o2m1cYzUpgBNZq9zwZ78FcP8JCA4FjlJEAWZz7QLF/3rn5TS45c2GRFn4tFdV17O5t1DQ=="], + + "kiana-v6/typescript": ["typescript@5.9.3", "", { "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" } }, "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw=="], + + "kiana-v6/zod": ["zod@4.1.13", "", {}, "sha512-AvvthqfqrAhNH9dnfmrfKzX5upOdjUVJYFqNSlkmGf64gRaTzlPwz99IHYnVs28qYAybvAlBV+H7pn0saFY4Ig=="], + "lazystream/readable-stream": ["readable-stream@2.3.8", "", { "dependencies": { "core-util-is": "~1.0.0", "inherits": "~2.0.3", "isarray": "~1.0.0", "process-nextick-args": "~2.0.0", "safe-buffer": "~5.1.1", "string_decoder": "~1.1.1", "util-deprecate": "~1.0.1" } }, "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA=="], "md-to-react-email/marked": ["marked@7.0.4", "", { "bin": { "marked": "bin/marked.js" } }, "sha512-t8eP0dXRJMtMvBojtkcsA7n48BkauktUKzfkPSCq85ZMTJ0v76Rke4DYz01omYpPTUh4p/f7HePgRo3ebG8+QQ=="], @@ -4891,6 +4925,24 @@ "jsonwebtoken/jws/jwa": ["jwa@1.4.2", "", { "dependencies": { "buffer-equal-constant-time": "^1.0.1", "ecdsa-sig-formatter": "1.0.11", "safe-buffer": "^5.0.1" } }, "sha512-eeH5JO+21J78qMvTIDdBXidBd6nG2kZjg5Ohz/1fpa28Z4CcsWUzJ1ZZyFq/3z3N17aZy+ZuBoHljASbL1WfOw=="], + "kiana-v6/@ai-sdk/anthropic/@ai-sdk/provider": ["@ai-sdk/provider@3.0.0-beta.26", "", { "dependencies": { "json-schema": "^0.4.0" } }, "sha512-UQyOlrpahFL1CZ/QA0ZpFhAkE32fw1XXBx+6gu23YWSCMJCjaf/fiJUPV7xUhp/nXqVO/IC+PIIfLomx55D16A=="], + + "kiana-v6/@ai-sdk/anthropic/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@4.0.0-beta.46", "", { "dependencies": { "@ai-sdk/provider": "3.0.0-beta.26", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "@valibot/to-json-schema": "^1.3.0", "arktype": "^2.1.22", "effect": "^3.18.4", "zod": "^3.25.76 || ^4.1.8" }, "optionalPeers": ["@valibot/to-json-schema", "arktype", "effect"] }, "sha512-kVCohWGiqR3SkLNMSwW3vOVFUC/g75+3/4me6u1TH4Zv7/Y6kPIU/3wI9OyrsTx3mqsd2SdG6gKzz1AMHwoupQ=="], + + "kiana-v6/@ai-sdk/openai/@ai-sdk/provider": ["@ai-sdk/provider@3.0.0-beta.26", "", { "dependencies": { "json-schema": "^0.4.0" } }, "sha512-UQyOlrpahFL1CZ/QA0ZpFhAkE32fw1XXBx+6gu23YWSCMJCjaf/fiJUPV7xUhp/nXqVO/IC+PIIfLomx55D16A=="], + + "kiana-v6/@ai-sdk/openai/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@4.0.0-beta.46", "", { "dependencies": { "@ai-sdk/provider": "3.0.0-beta.26", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "@valibot/to-json-schema": "^1.3.0", "arktype": "^2.1.22", "effect": "^3.18.4", "zod": "^3.25.76 || ^4.1.8" }, "optionalPeers": ["@valibot/to-json-schema", "arktype", "effect"] }, "sha512-kVCohWGiqR3SkLNMSwW3vOVFUC/g75+3/4me6u1TH4Zv7/Y6kPIU/3wI9OyrsTx3mqsd2SdG6gKzz1AMHwoupQ=="], + + "kiana-v6/@ai-sdk/openai-compatible/@ai-sdk/provider": ["@ai-sdk/provider@3.0.0-beta.26", "", { "dependencies": { "json-schema": "^0.4.0" } }, "sha512-UQyOlrpahFL1CZ/QA0ZpFhAkE32fw1XXBx+6gu23YWSCMJCjaf/fiJUPV7xUhp/nXqVO/IC+PIIfLomx55D16A=="], + + "kiana-v6/@ai-sdk/openai-compatible/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@4.0.0-beta.46", "", { "dependencies": { "@ai-sdk/provider": "3.0.0-beta.26", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "@valibot/to-json-schema": "^1.3.0", "arktype": "^2.1.22", "effect": "^3.18.4", "zod": "^3.25.76 || ^4.1.8" }, "optionalPeers": ["@valibot/to-json-schema", "arktype", "effect"] }, "sha512-kVCohWGiqR3SkLNMSwW3vOVFUC/g75+3/4me6u1TH4Zv7/Y6kPIU/3wI9OyrsTx3mqsd2SdG6gKzz1AMHwoupQ=="], + + "kiana-v6/ai/@ai-sdk/gateway": ["@ai-sdk/gateway@2.0.0-beta.76", "", { "dependencies": { "@ai-sdk/provider": "3.0.0-beta.26", "@ai-sdk/provider-utils": "4.0.0-beta.46", "@vercel/oidc": "3.0.5" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-H0vdg0rG1IHtohsmFgRFXLFfCNL8McjMhjy8Ul2DY++o3/znNt9eJXhjDfiS+MxhomED4EA0Zn3W24iiKRFpqg=="], + + "kiana-v6/ai/@ai-sdk/provider": ["@ai-sdk/provider@3.0.0-beta.26", "", { "dependencies": { "json-schema": "^0.4.0" } }, "sha512-UQyOlrpahFL1CZ/QA0ZpFhAkE32fw1XXBx+6gu23YWSCMJCjaf/fiJUPV7xUhp/nXqVO/IC+PIIfLomx55D16A=="], + + "kiana-v6/ai/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@4.0.0-beta.46", "", { "dependencies": { "@ai-sdk/provider": "3.0.0-beta.26", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "@valibot/to-json-schema": "^1.3.0", "arktype": "^2.1.22", "effect": "^3.18.4", "zod": "^3.25.76 || ^4.1.8" }, "optionalPeers": ["@valibot/to-json-schema", "arktype", "effect"] }, "sha512-kVCohWGiqR3SkLNMSwW3vOVFUC/g75+3/4me6u1TH4Zv7/Y6kPIU/3wI9OyrsTx3mqsd2SdG6gKzz1AMHwoupQ=="], + "kiana/@ai-sdk/anthropic/@ai-sdk/provider": ["@ai-sdk/provider@3.0.0-beta.26", "", { "dependencies": { "json-schema": "^0.4.0" } }, "sha512-UQyOlrpahFL1CZ/QA0ZpFhAkE32fw1XXBx+6gu23YWSCMJCjaf/fiJUPV7xUhp/nXqVO/IC+PIIfLomx55D16A=="], "kiana/@ai-sdk/anthropic/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@4.0.0-beta.46", "", { "dependencies": { "@ai-sdk/provider": "3.0.0-beta.26", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "@valibot/to-json-schema": "^1.3.0", "arktype": "^2.1.22", "effect": "^3.18.4", "zod": "^3.25.76 || ^4.1.8" }, "optionalPeers": ["@valibot/to-json-schema", "arktype", "effect"] }, "sha512-kVCohWGiqR3SkLNMSwW3vOVFUC/g75+3/4me6u1TH4Zv7/Y6kPIU/3wI9OyrsTx3mqsd2SdG6gKzz1AMHwoupQ=="], @@ -4903,7 +4955,7 @@ "kiana/@ai-sdk/openai-compatible/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@4.0.0-beta.46", "", { "dependencies": { "@ai-sdk/provider": "3.0.0-beta.26", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "@valibot/to-json-schema": "^1.3.0", "arktype": "^2.1.22", "effect": "^3.18.4", "zod": "^3.25.76 || ^4.1.8" }, "optionalPeers": ["@valibot/to-json-schema", "arktype", "effect"] }, "sha512-kVCohWGiqR3SkLNMSwW3vOVFUC/g75+3/4me6u1TH4Zv7/Y6kPIU/3wI9OyrsTx3mqsd2SdG6gKzz1AMHwoupQ=="], - "kiana/ai/@ai-sdk/gateway": ["@ai-sdk/gateway@2.0.0-beta.75", "", { "dependencies": { "@ai-sdk/provider": "3.0.0-beta.26", "@ai-sdk/provider-utils": "4.0.0-beta.46", "@vercel/oidc": "3.0.5" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-hxfPJLt80SHEuSmhlcbtgextRSH7aHAySWaN2CVuPASfqidigTEsOt+4t0/9Xqf7v3I5lHqlgMEbqfhzc7eGOQ=="], + "kiana/ai/@ai-sdk/gateway": ["@ai-sdk/gateway@2.0.0-beta.76", "", { "dependencies": { "@ai-sdk/provider": "3.0.0-beta.26", "@ai-sdk/provider-utils": "4.0.0-beta.46", "@vercel/oidc": "3.0.5" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-H0vdg0rG1IHtohsmFgRFXLFfCNL8McjMhjy8Ul2DY++o3/znNt9eJXhjDfiS+MxhomED4EA0Zn3W24iiKRFpqg=="], "kiana/ai/@ai-sdk/provider": ["@ai-sdk/provider@3.0.0-beta.26", "", { "dependencies": { "json-schema": "^0.4.0" } }, "sha512-UQyOlrpahFL1CZ/QA0ZpFhAkE32fw1XXBx+6gu23YWSCMJCjaf/fiJUPV7xUhp/nXqVO/IC+PIIfLomx55D16A=="], diff --git a/packages/kiana-v6/kiana.jsonc b/packages/kiana-v6/kiana.jsonc new file mode 100644 index 00000000000..101fe8b5b66 --- /dev/null +++ b/packages/kiana-v6/kiana.jsonc @@ -0,0 +1,9 @@ +{ + "provider": { + "type": "openai-compatible", + "apiKey": "23ed9d5c-c634-4cc1-9e70-9c9ac63a17ef", + "model": "ep-20250821060450-4bc6g", + "baseUrl": "https://ark-ap-southeast.byteintl.net/api/v3" + }, + "streaming": true +} diff --git a/packages/kiana-v6/package.json b/packages/kiana-v6/package.json new file mode 100644 index 00000000000..eb0040f1463 --- /dev/null +++ b/packages/kiana-v6/package.json @@ -0,0 +1,39 @@ +{ + "name": "kiana-v6", + "version": "0.1.0", + "description": "Kiana v6 - Headless coding agent using Vercel AI SDK v6 ToolLoopAgent", + "type": "module", + "main": "./dist/index.js", + "types": "./dist/index.d.ts", + "bin": { + "kiana-v6": "./dist/cli.js" + }, + "exports": { + ".": { + "types": "./dist/index.d.ts", + "import": "./dist/index.js" + } + }, + "scripts": { + "build": "tsc", + "dev": "tsc --watch", + "typecheck": "tsc --noEmit" + }, + "dependencies": { + "ai": "beta", + "@ai-sdk/anthropic": "beta", + "@ai-sdk/openai": "beta", + "@ai-sdk/openai-compatible": "beta", + "@ai-sdk/google": "beta", + "zod": "latest", + "jsonc-parser": "latest" + }, + "devDependencies": { + "typescript": "latest", + "@types/node": "latest" + }, + "engines": { + "node": ">=20" + }, + "license": "MIT" +} diff --git a/packages/kiana-v6/src/agent.ts b/packages/kiana-v6/src/agent.ts new file mode 100644 index 00000000000..281e8bc6a75 --- /dev/null +++ b/packages/kiana-v6/src/agent.ts @@ -0,0 +1,922 @@ +import { ToolLoopAgent, tool, jsonSchema, stepCountIs } from "ai" +import type { LanguageModel } from "ai" +import { z } from "zod" +import { spawn } from "node:child_process" +import * as fs from "node:fs" +import * as path from "node:path" +import type { Tool, ToolContext, ToolResult } from "./tool/tool.js" +import { ALL_TOOLS, DEFAULT_TOOLS, setSubagentExecutor } from "./tool/index.js" +import type { SessionInfo } from "./types/session.js" +import type { MessageInfo } from "./types/message.js" +import type { + Part, + TextPart, + ToolPart, +} from "./types/part.js" +import type { + StreamPart, + StartPart, + TextStartPart, + TextDeltaPart, + TextEndPart, + ToolInputAvailablePart, + ToolOutputAvailablePart, + ToolOutputErrorPart, + StartStepPart, + FinishStepPart, + FinishPart, + DataSessionPart, + DataSessionIdlePart, + DataTodoPart, + TokenUsage, +} from "./stream.js" + +// Re-export stream types +export type { StreamPart } from "./stream.js" +export { formatSSE, parseSSE, createSSEHeaders } from "./stream.js" + +// ID generation +let idCounter = 0 +function generateId(prefix: string): string { + const timestamp = Date.now().toString(36) + const counter = (idCounter++).toString(36).padStart(4, "0") + return `${prefix}_${timestamp}${counter}` +} + +// Storage for session persistence +interface SessionStorage { + sessionDir: string + save(session: SessionInfo, messages: MessageWithParts[]): void + load(): { session: SessionInfo; messages: MessageWithParts[] } | null +} + +function createSessionStorage(sessionDir: string): SessionStorage { + const sessionFile = path.join(sessionDir, "session.json") + const messagesFile = path.join(sessionDir, "messages.json") + + return { + sessionDir, + save(session: SessionInfo, messages: MessageWithParts[]) { + fs.mkdirSync(sessionDir, { recursive: true }) + fs.writeFileSync(sessionFile, JSON.stringify(session, null, 2)) + fs.writeFileSync(messagesFile, JSON.stringify(messages, null, 2)) + }, + load() { + if (!fs.existsSync(sessionFile) || !fs.existsSync(messagesFile)) { + return null + } + try { + const session = JSON.parse(fs.readFileSync(sessionFile, "utf-8")) as SessionInfo + const messages = JSON.parse(fs.readFileSync(messagesFile, "utf-8")) as MessageWithParts[] + return { session, messages } + } catch { + return null + } + }, + } +} + +interface MessageWithParts { + info: MessageInfo + parts: Part[] +} + +export interface CodingAgentConfig { + /** The language model to use */ + model: LanguageModel + /** Working directory for file operations */ + workingDirectory?: string + /** System instructions for the agent */ + instructions?: string + /** Subset of tools to enable (defaults to all) */ + tools?: string[] + /** Maximum steps before stopping (default: 50) */ + maxSteps?: number + /** Maximum retries for API calls (default: 3) */ + maxRetries?: number + /** Session directory for persistence */ + sessionDir?: string +} + +export interface GenerateParams { + /** The prompt/message to send */ + prompt: string + /** Abort signal for cancellation */ + abortSignal?: AbortSignal +} + +export interface StreamParams extends GenerateParams {} + +/** Callback for receiving stream parts */ +export type StreamCallback = (part: StreamPart) => void + +export class CodingAgent { + readonly id: string + private messages: MessageWithParts[] + private tools: Record + private workingDirectory: string + private storage: SessionStorage | null + private config: CodingAgentConfig + private abortController: AbortController + private streamCallbacks: Set = new Set() + + constructor(config: CodingAgentConfig) { + this.config = config + this.workingDirectory = config.workingDirectory || process.cwd() + this.storage = config.sessionDir ? createSessionStorage(config.sessionDir) : null + this.abortController = new AbortController() + + // Load existing session or create new + const existing = this.storage?.load() + this.id = existing?.session.id ?? generateId("session") + this.messages = existing?.messages ?? [] + + // Get the subset of tools + const toolNames = config.tools ?? DEFAULT_TOOLS + this.tools = {} + for (const name of toolNames) { + if (ALL_TOOLS[name]) { + this.tools[name] = ALL_TOOLS[name] + } + } + + // Setup subagent executor for Task tool + this.setupSubagentExecutor() + } + + private emit(part: StreamPart): void { + for (const cb of this.streamCallbacks) { + try { + cb(part) + } catch (err) { + console.error("Error in stream callback:", err) + } + } + } + + private setupSubagentExecutor(): void { + setSubagentExecutor(async (params) => { + // Create a subagent with the specified agent config + const subAgent = new CodingAgent({ + ...this.config, + instructions: params.agentConfig.systemPrompt, + // Disable task tool in subagents to prevent infinite recursion + tools: (this.config.tools ?? DEFAULT_TOOLS).filter((t) => !["task", "todowrite", "todoread"].includes(t)), + }) + + // Forward stream parts from subagent with context + subAgent.onStream((part) => { + // Add subagent context via data part + this.emit({ + type: "data-subagent-context", + data: { + parentSessionID: params.parentSessionID, + depth: params.depth, + agentType: params.agentType, + }, + }) + this.emit(part) + }) + + // Generate response + const result = await subAgent.generate({ prompt: params.prompt }) + + return { + output: result.text, + sessionID: subAgent.id, + } + }) + } + + /** + * Subscribe to stream parts + */ + onStream(callback: StreamCallback): () => void { + this.streamCallbacks.add(callback) + return () => this.streamCallbacks.delete(callback) + } + + /** + * Generate a response (non-streaming) - still emits stream parts for compatibility + */ + async generate(params: GenerateParams): Promise<{ text: string; usage?: any }> { + const { agent, callParams, messageId } = await this.buildAgent(params) + + // Emit session info + this.emitSessionStart(messageId) + + const result = await agent.generate({ + ...callParams, + abortSignal: params.abortSignal ?? this.abortController.signal, + }) + + // Emit finish + this.emit({ + type: "finish", + finishReason: "stop", + usage: this.convertUsage(result.usage), + }) + + // Emit session idle + this.emit({ + type: "data-session-idle", + data: { sessionID: this.id }, + }) + + this.saveSession() + + return { + text: result.text, + usage: result.usage, + } + } + + /** + * Stream a response - returns async iterable that emits events via onStream callback + */ + async stream(params: StreamParams): Promise<{ + textStream: AsyncIterable + text: Promise + }> { + const { agent, callParams, messageId, assistantParts } = await this.buildAgent(params) + + // Emit session info + this.emitSessionStart(messageId) + + const result = await agent.stream({ + ...callParams, + abortSignal: params.abortSignal ?? this.abortController.signal, + }) + + const self = this + let textPartId: string | null = null + + // Wrap text stream to emit events AND yield text + const textStream = (async function* (): AsyncGenerator { + for await (const chunk of result.textStream) { + // Emit text-start on first chunk + if (!textPartId) { + textPartId = generateId("text") + self.emit({ type: "text-start", id: textPartId }) + } + + // Emit text-delta + self.emit({ type: "text-delta", id: textPartId, delta: chunk }) + + yield chunk + } + + // Emit text-end if we started + if (textPartId) { + self.emit({ type: "text-end", id: textPartId }) + } + })() + + // When streaming completes, emit finish events + const textPromise = result.text.then((text) => { + self.emit({ + type: "finish", + finishReason: "stop", + }) + + self.emit({ + type: "data-session-idle", + data: { sessionID: self.id }, + }) + + self.saveSession() + return text + }) + + return { + textStream, + text: textPromise, + } + } + + /** + * Abort the current generation + */ + abort(): void { + this.abortController.abort() + this.abortController = new AbortController() + + this.emit({ + type: "data-session-idle", + data: { sessionID: this.id }, + }) + } + + /** + * Get list of available tools + */ + getTools(): { name: string; description: string }[] { + return Object.entries(this.tools).map(([name, tool]) => ({ + name, + description: tool.description, + })) + } + + private emitSessionStart(messageId: string): void { + const sessionInfo: SessionInfo = { + id: this.id, + projectID: generateId("project"), + directory: this.workingDirectory, + title: "Session", + version: "1.0.0", + time: { + created: Date.now(), + updated: Date.now(), + }, + } + + // Emit session data + this.emit({ + type: "data-session", + data: sessionInfo, + }) + + // Emit message start + this.emit({ + type: "start", + messageId, + metadata: { sessionID: this.id }, + }) + } + + private async buildAgent(params: GenerateParams): Promise<{ + agent: ToolLoopAgent, never> + callParams: { prompt: string } | { messages: any[] } + messageId: string + assistantParts: Part[] + }> { + // Build system prompt with environment context + const baseInstructions = this.config.instructions || getDefaultSystemPrompt() + const envContext = await buildEnvironmentContext(this.workingDirectory) + const instructions = baseInstructions + "\n" + envContext + + // Create message for tracking + const userMessageID = generateId("message") + const userMessage: MessageInfo = { + id: userMessageID, + sessionID: this.id, + role: "user", + time: { created: Date.now() }, + } + + const userTextPart: TextPart = { + id: generateId("part"), + sessionID: this.id, + messageID: userMessageID, + type: "text", + text: params.prompt, + } + + this.messages.push({ + info: userMessage, + parts: [userTextPart], + }) + + // Create assistant message for tracking parts + const assistantMessageID = generateId("message") + const assistantMessage: MessageInfo = { + id: assistantMessageID, + sessionID: this.id, + role: "assistant", + parentID: userMessageID, + time: { created: Date.now() }, + cost: 0, + tokens: { + input: 0, + output: 0, + reasoning: 0, + cache: { read: 0, write: 0 }, + }, + } + + const assistantParts: Part[] = [] + this.messages.push({ info: assistantMessage, parts: assistantParts }) + + // Build v6 tools + const aiTools = this.buildAITools(assistantMessageID, assistantParts) + + // Build call params - use messages if we have history, otherwise just prompt + const historyMessages = this.buildAIMessages() + // Remove the last user message since that's the current prompt + const previousMessages = historyMessages.slice(0, -1) + + // If we have previous messages, add current prompt to them + // Otherwise just use the prompt directly + const callParams: { prompt: string } | { messages: any[] } = previousMessages.length > 0 + ? { messages: [...previousMessages, { role: "user", content: params.prompt }] } + : { prompt: params.prompt } + + const self = this + + const agent = new ToolLoopAgent({ + model: this.config.model, + instructions, + tools: aiTools, + stopWhen: stepCountIs(this.config.maxSteps ?? 50), + maxRetries: this.config.maxRetries ?? 3, + onStepFinish: (step) => { + // Emit step finish + const finishStepPart: FinishStepPart = { + type: "finish-step", + usage: self.convertUsage(step.usage), + } + self.emit(finishStepPart) + + // Update message token counts + self.updateTokensFromUsage(assistantMessage, step.usage) + }, + onFinish: (event) => { + // Update final token counts + self.updateTokensFromUsage(assistantMessage, event.totalUsage) + assistantMessage.time.completed = Date.now() + }, + }) + + return { agent, callParams, messageId: assistantMessageID, assistantParts } + } + + private buildAITools(messageID: string, parts: Part[]): Record { + const aiTools: Record = {} + const self = this + + for (const [name, toolDef] of Object.entries(this.tools)) { + const schema = z.toJSONSchema(toolDef.parameters) + + aiTools[name] = tool({ + description: toolDef.description, + inputSchema: jsonSchema(schema as any), + // Called when tool input is fully available - emit tool-input-available + onInputAvailable: async ({ input, toolCallId }: { input: any; toolCallId: string }) => { + // Emit start-step for tool execution + self.emit({ type: "start-step" }) + + // Emit tool-input-available (AI SDK format) + const toolInputPart: ToolInputAvailablePart = { + type: "tool-input-available", + toolCallId, + toolName: name, + input: input as Record, + } + self.emit(toolInputPart) + + // Also track internally + const toolPart: ToolPart = { + id: generateId("part"), + sessionID: self.id, + messageID, + type: "tool", + tool: name, + callID: toolCallId, + state: { + status: "running", + input: input as Record, + time: { start: Date.now() }, + }, + } + parts.push(toolPart) + }, + execute: async (args: any, options: { abortSignal?: AbortSignal; toolCallId: string }) => { + const ctx: ToolContext = { + sessionID: self.id, + messageID, + workingDirectory: self.workingDirectory, + abort: options.abortSignal ?? new AbortController().signal, + metadata: (val) => { + // Find and update the tool part + const toolPart = parts.find( + (p): p is ToolPart => p.type === "tool" && p.callID === options.toolCallId + ) + if (toolPart && toolPart.state.status === "running") { + toolPart.state = { + ...toolPart.state, + title: val.title, + metadata: val.metadata, + } + } + }, + } + + try { + const result = await toolDef.execute(args, ctx) + + // Emit tool-output-available (AI SDK format) + const toolOutputPart: ToolOutputAvailablePart = { + type: "tool-output-available", + toolCallId: options.toolCallId, + output: result.output, + metadata: { + title: result.title, + ...result.metadata, + }, + } + self.emit(toolOutputPart) + + // Update internal tracking + const toolPart = parts.find( + (p): p is ToolPart => p.type === "tool" && p.callID === options.toolCallId + ) + if (toolPart) { + toolPart.state = { + status: "completed", + input: toolPart.state.input, + output: result.output, + title: result.title, + metadata: result.metadata, + time: { + start: (toolPart.state as any).time?.start ?? Date.now(), + end: Date.now(), + }, + } + } + + return result + } catch (error) { + const errorMsg = error instanceof Error ? error.message : String(error) + + // Emit tool-output-error (AI SDK format) + const toolErrorPart: ToolOutputErrorPart = { + type: "tool-output-error", + toolCallId: options.toolCallId, + error: errorMsg, + } + self.emit(toolErrorPart) + + // Update internal tracking + const toolPart = parts.find( + (p): p is ToolPart => p.type === "tool" && p.callID === options.toolCallId + ) + if (toolPart) { + toolPart.state = { + status: "error", + input: toolPart.state.input, + error: errorMsg, + time: { + start: (toolPart.state as any).time?.start ?? Date.now(), + end: Date.now(), + }, + } + } + throw error + } + }, + toModelOutput: (result: ToolResult) => ({ + type: "text", + value: result.output, + }), + }) + } + + return aiTools + } + + private buildAIMessages(): any[] { + const aiMessages: any[] = [] + + let i = 0 + while (i < this.messages.length) { + const msg = this.messages[i] + + if (msg.info.role === "user") { + const textParts = msg.parts.filter((p): p is TextPart => p.type === "text") + if (textParts.length > 0) { + aiMessages.push({ + role: "user", + content: textParts.map((p) => p.text).join("\n"), + }) + } + i++ + } else if (msg.info.role === "assistant") { + // Collect all parts from consecutive assistant messages + const allTextParts: TextPart[] = [] + const allToolParts: ToolPart[] = [] + + while (i < this.messages.length && this.messages[i].info.role === "assistant") { + const assistantMsg = this.messages[i] + const textParts = assistantMsg.parts.filter( + (p): p is TextPart => p.type === "text" && !!p.text + ) + const toolParts = assistantMsg.parts.filter( + (p): p is ToolPart => + p.type === "tool" && (p.state.status === "completed" || p.state.status === "error") + ) + allTextParts.push(...textParts) + allToolParts.push(...toolParts) + i++ + } + + if (allToolParts.length > 0) { + // Build assistant message with text and tool calls + const content: any[] = [] + + for (const textPart of allTextParts) { + content.push({ type: "text", text: textPart.text }) + } + + for (const toolPart of allToolParts) { + content.push({ + type: "tool-call", + toolCallId: toolPart.callID, + toolName: toolPart.tool, + input: JSON.stringify(toolPart.state.input), + }) + } + + if (content.length > 0) { + aiMessages.push({ + role: "assistant", + content, + }) + } + + // Add tool results + const toolResultContent: any[] = [] + for (const toolPart of allToolParts) { + const isError = toolPart.state.status === "error" + const outputValue = isError + ? `Error: ${(toolPart.state as any).error}` + : (toolPart.state as any).output ?? "" + + toolResultContent.push({ + type: "tool-result", + toolCallId: toolPart.callID, + toolName: toolPart.tool, + output: { + type: isError ? "error-text" : "text", + value: String(outputValue), + }, + }) + } + + if (toolResultContent.length > 0) { + aiMessages.push({ + role: "tool", + content: toolResultContent, + }) + } + } else if (allTextParts.length > 0) { + aiMessages.push({ + role: "assistant", + content: allTextParts.map((p) => p.text).join("\n"), + }) + } + } else { + i++ + } + } + + return aiMessages + } + + private convertUsage(usage: any): TokenUsage | undefined { + if (!usage) return undefined + + let inputTotal = 0 + let outputTotal = 0 + let cachedRead = 0 + let reasoning = 0 + + if (typeof usage.inputTokens === "object" && usage.inputTokens !== null) { + inputTotal = usage.inputTokens.total ?? 0 + cachedRead = usage.inputTokens.cacheRead ?? 0 + } else { + inputTotal = usage.inputTokens ?? usage.promptTokens ?? 0 + cachedRead = usage.cachedInputTokens ?? 0 + } + + if (typeof usage.outputTokens === "object" && usage.outputTokens !== null) { + outputTotal = usage.outputTokens.total ?? 0 + reasoning = usage.outputTokens.reasoning ?? 0 + } else { + outputTotal = usage.outputTokens ?? usage.completionTokens ?? 0 + reasoning = usage.reasoningTokens ?? 0 + } + + return { + inputTokens: inputTotal, + outputTokens: outputTotal, + reasoningTokens: reasoning, + cacheReadTokens: cachedRead, + } + } + + private updateTokensFromUsage(message: MessageInfo, usage: any): void { + if (!usage) return + + const converted = this.convertUsage(usage) + if (!converted) return + + ;(message as any).tokens = { + input: (converted.inputTokens ?? 0) - (converted.cacheReadTokens ?? 0), + output: converted.outputTokens ?? 0, + reasoning: converted.reasoningTokens ?? 0, + cache: { + read: converted.cacheReadTokens ?? 0, + write: 0, + }, + } + } + + private saveSession(): void { + if (this.storage) { + const sessionInfo: SessionInfo = { + id: this.id, + projectID: generateId("project"), + directory: this.workingDirectory, + title: "Session", + version: "1.0.0", + time: { + created: Date.now(), + updated: Date.now(), + }, + } + this.storage.save(sessionInfo, this.messages) + } + } +} + +function getDefaultSystemPrompt(): string { + return `You are Kiana, a powerful coding agent running in headless mode. + +You help users with software engineering tasks using the tools available to you. You are running non-interactively, so you cannot ask clarifying questions - make reasonable assumptions and proceed. + +IMPORTANT: You must NEVER generate or guess URLs unless you are confident they are for programming help. + +# Tone and style +- No emojis unless explicitly requested. +- Responses should be short and concise. +- Use Github-flavored markdown for formatting. +- Output text to communicate; use tools only to complete tasks. +- NEVER create files unless absolutely necessary. Prefer editing existing files. + +# Professional objectivity +Prioritize technical accuracy over validation. Focus on facts and problem-solving with direct, objective technical info. + +# Non-interactive mode +Since you are running headless without user interaction: +- Do not ask for clarification - make reasonable assumptions +- Complete tasks autonomously +- Report progress and results via tool outputs + +# Tool usage +- Call multiple tools in parallel when independent. +- Use specialized tools (Read, Write, Edit, Glob, Grep) instead of bash equivalents. + +# Code References +When referencing code, include \`file_path:line_number\` for navigation.` +} + +/** + * Build environment context for the system prompt. + */ +async function buildEnvironmentContext(workingDirectory: string): Promise { + const isGitRepo = await checkIsGitRepo(workingDirectory) + const gitBranch = isGitRepo ? await getGitBranch(workingDirectory) : null + const gitStatus = isGitRepo ? await getGitStatus(workingDirectory) : null + const fileTree = await getFileTree(workingDirectory, 200) + + const lines = [ + `Here is useful information about the environment you are running in:`, + ``, + `Working directory: ${workingDirectory}`, + `Is directory a git repo: ${isGitRepo ? "Yes" : "No"}`, + ] + + if (gitBranch) { + lines.push(`Current branch: ${gitBranch}`) + } + + lines.push(`Platform: ${process.platform}`) + lines.push(`Today's date: ${new Date().toISOString().split("T")[0]}`) + lines.push(``) + + if (gitStatus) { + lines.push(``) + lines.push(``) + lines.push(gitStatus) + lines.push(``) + } + + if (fileTree) { + lines.push(``) + lines.push(``) + lines.push(fileTree) + lines.push(``) + } + + return lines.join("\n") +} + +async function checkIsGitRepo(cwd: string): Promise { + return new Promise((resolve) => { + const proc = spawn("git", ["rev-parse", "--is-inside-work-tree"], { + cwd, + stdio: ["ignore", "pipe", "pipe"], + }) + proc.on("close", (code) => resolve(code === 0)) + proc.on("error", () => resolve(false)) + }) +} + +async function getGitBranch(cwd: string): Promise { + return new Promise((resolve) => { + const proc = spawn("git", ["branch", "--show-current"], { + cwd, + stdio: ["ignore", "pipe", "pipe"], + }) + let output = "" + proc.stdout.on("data", (data) => (output += data.toString())) + proc.on("close", (code) => { + if (code === 0 && output.trim()) { + resolve(output.trim()) + } else { + resolve(null) + } + }) + proc.on("error", () => resolve(null)) + }) +} + +async function getGitStatus(cwd: string): Promise { + return new Promise((resolve) => { + const proc = spawn("git", ["status", "--short"], { + cwd, + stdio: ["ignore", "pipe", "pipe"], + }) + let output = "" + proc.stdout.on("data", (data) => (output += data.toString())) + proc.on("close", (code) => { + if (code === 0 && output.trim()) { + const lines = output.trim().split("\n").slice(0, 50) + if (output.trim().split("\n").length > 50) { + lines.push("... (truncated)") + } + resolve(lines.join("\n")) + } else { + resolve(null) + } + }) + proc.on("error", () => resolve(null)) + }) +} + +async function getFileTree(cwd: string, limit: number): Promise { + // Try ripgrep first + const rgResult = await runCommand("rg", ["--files", "--sort", "path"], cwd, limit) + if (rgResult) return rgResult + + // Fallback to git ls-files + const gitResult = await runCommand("git", ["ls-files"], cwd, limit) + if (gitResult) return gitResult + + // Last resort: find + const findResult = await runCommand( + "find", + [".", "-type", "f", "-not", "-path", "*/.*", "-not", "-path", "*/node_modules/*"], + cwd, + limit + ) + return findResult +} + +async function runCommand( + cmd: string, + args: string[], + cwd: string, + lineLimit: number +): Promise { + return new Promise((resolve) => { + const proc = spawn(cmd, args, { + cwd, + stdio: ["ignore", "pipe", "pipe"], + }) + let output = "" + let lineCount = 0 + proc.stdout.on("data", (data) => { + const text = data.toString() + const lines = text.split("\n") + for (const line of lines) { + if (lineCount >= lineLimit) { + proc.kill() + return + } + if (line) { + output += line + "\n" + lineCount++ + } + } + }) + proc.on("close", () => { + if (output.trim()) { + resolve(output.trim()) + } else { + resolve(null) + } + }) + proc.on("error", () => resolve(null)) + }) +} diff --git a/packages/kiana-v6/src/cli.ts b/packages/kiana-v6/src/cli.ts new file mode 100644 index 00000000000..bcfb903f1dd --- /dev/null +++ b/packages/kiana-v6/src/cli.ts @@ -0,0 +1,450 @@ +#!/usr/bin/env node +/** + * Kiana v6 CLI - AI SDK UI Stream Protocol Edition + * + * Usage: + * kiana-v6 [options] + * + * Options: + * --config, -c Path to config file (default: ./kiana.jsonc) + * --prompt, -p Send a single prompt and exit + * --session, -s Session directory for persistence + * --create-config Generate a template config file + * -H Human-readable output (instead of SSE) + * -v, --verbose Show verbose output (tool inputs/outputs) + * --help, -h Show help + */ + +import * as fs from "node:fs" +import * as readline from "node:readline" +import { loadConfig, writeConfigTemplate } from "./config.js" +import { createLanguageModel } from "./provider.js" +import { CodingAgent, formatSSE, type StreamPart } from "./agent.js" + +// Parse command line arguments +interface Args { + config?: string + prompt?: string + session?: string + createConfig?: boolean + humanReadable: boolean + verbose: boolean + help: boolean +} + +function parseArgs(argv: string[]): Args { + const args: Args = { + humanReadable: false, + verbose: false, + help: false, + } + + for (let i = 2; i < argv.length; i++) { + const arg = argv[i] + switch (arg) { + case "--config": + case "-c": + args.config = argv[++i] + break + case "--prompt": + case "-p": + args.prompt = argv[++i] + break + case "--session": + case "-s": + args.session = argv[++i] + break + case "--create-config": + args.createConfig = true + break + case "-H": + args.humanReadable = true + break + case "-v": + case "--verbose": + args.verbose = true + break + case "--help": + case "-h": + args.help = true + break + } + } + + return args +} + +function printHelp(): void { + console.log(` +Kiana v6 - Minimal Headless Coding Agent (AI SDK UI Stream Protocol) + +Usage: + kiana-v6 [options] + +Options: + --config, -c Path to config file (default: ./kiana.jsonc) + --prompt, -p Send a single prompt and exit + --session, -s Session directory for persistence + --create-config Generate a template config file + -H Human-readable output (instead of SSE) + -v, --verbose Show verbose output (tool inputs/outputs) + --help, -h Show help + +Examples: + # Run with default config (./kiana.jsonc) + kiana-v6 -H -p "List all TypeScript files" + + # Run with a specific config + kiana-v6 -c config.jsonc -p "List all TypeScript files" + + # Human-readable streaming output + kiana-v6 -H -p "Analyze this codebase" + + # Interactive mode (read JSON from stdin) + kiana-v6 + + # Generate config template + kiana-v6 --create-config > kiana.jsonc +`) +} + +// ANSI color codes for human-readable mode +const colors = { + reset: "\x1b[0m", + dim: "\x1b[2m", + cyan: "\x1b[36m", + green: "\x1b[32m", + red: "\x1b[31m", + yellow: "\x1b[33m", + gray: "\x1b[90m", +} + +// Track if we're mid-line (text streaming without newline) +let needsNewlineBeforeBlock = false + +/** + * Format a unified diff with colors for terminal display. + * - Red for removed lines (-) + * - Green for added lines (+) + * - Cyan for hunk headers (@@) + * - Dim for context lines + */ +function formatDiff(diff: string): string { + return diff + .split("\n") + .map((line) => { + if (line.startsWith("+++") || line.startsWith("---")) { + // File headers - dim + return `${colors.dim}${line}${colors.reset}` + } else if (line.startsWith("@@")) { + // Hunk headers - cyan + return `${colors.cyan}${line}${colors.reset}` + } else if (line.startsWith("+")) { + // Added lines - green + return `${colors.green}${line}${colors.reset}` + } else if (line.startsWith("-")) { + // Removed lines - red + return `${colors.red}${line}${colors.reset}` + } else { + // Context lines - dim + return `${colors.dim}${line}${colors.reset}` + } + }) + .join("\n") +} + +// Human-readable formatter for stream parts +function formatPartHumanReadable(part: StreamPart, verbose: boolean = false): string | null { + switch (part.type) { + case "data-session": + return `${colors.dim}Session: ${part.data.id}${colors.reset}\n` + + case "start": + return null // Suppress start message + + case "text-start": + return null // Text will be shown via deltas + + case "text-delta": + needsNewlineBeforeBlock = true // We're mid-text + return part.delta + + case "text-end": + needsNewlineBeforeBlock = false + return "\n" + + case "tool-input-available": { + // Add newline if we were mid-text + const prefix = needsNewlineBeforeBlock ? "\n" : "" + needsNewlineBeforeBlock = false + + // Format tool-specific preview + let preview = "" + const input = part.input as Record + + switch (part.toolName) { + case "bash": + // Show the command being run + if (input.command) { + const cmd = String(input.command) + const truncated = cmd.length > 80 ? cmd.slice(0, 77) + "..." : cmd + preview = ` ${colors.gray}${truncated}${colors.reset}` + } + break + case "read": + case "write": + case "edit": + case "glob": + // Show the file path or pattern + if (input.file_path) { + preview = ` ${colors.gray}${input.file_path}${colors.reset}` + } else if (input.pattern) { + preview = ` ${colors.gray}${input.pattern}${colors.reset}` + } + break + case "list": + // Show the directory being listed + if (input.path) { + preview = ` ${colors.gray}${input.path}${colors.reset}` + } else { + preview = ` ${colors.gray}.${colors.reset}` + } + break + case "grep": + // Show the pattern being searched + if (input.pattern) { + preview = ` ${colors.gray}${input.pattern}${colors.reset}` + } + break + default: + // For other tools, show truncated JSON in verbose mode + if (verbose) { + preview = ` ${colors.gray}${JSON.stringify(input).slice(0, 80)}${colors.reset}` + } + } + + return `${prefix}${colors.cyan}▶ ${part.toolName}${colors.reset}${preview}\n` + } + + case "tool-output-available": { + const title = part.metadata?.title || part.toolCallId + let extra = "" + + // Show diff for edit operations (always, not just verbose) + if (part.metadata?.diff) { + extra = "\n" + formatDiff(String(part.metadata.diff)) + } else if (verbose && part.output) { + extra = `\n${colors.gray}${String(part.output).slice(0, 200)}${colors.reset}` + } + + return `${colors.green}✓ ${title}${colors.reset}${extra}\n` + } + + case "tool-output-error": + return `${colors.red}✗ Error: ${part.error}${colors.reset}\n` + + case "start-step": + return null // Suppress step markers + + case "finish-step": + return null // Suppress step markers + + case "finish": + return null // Final finish is handled by idle + + case "data-session-idle": { + // Add newline if we were mid-text + const prefix = needsNewlineBeforeBlock ? "\n" : "" + needsNewlineBeforeBlock = false + return `${prefix}${colors.dim}--- Session idle ---${colors.reset}\n` + } + + case "data-todo": + if (verbose) { + const todoList = part.data.todos + .map((t) => ` [${t.status}] ${t.content}`) + .join("\n") + return `${colors.yellow}Todos:${colors.reset}\n${todoList}\n` + } + return null + + case "data-subagent-context": + if (verbose) { + return `${colors.dim}[Subagent depth=${part.data.depth} type=${part.data.agentType}]${colors.reset}\n` + } + return null + + case "error": + return `${colors.red}Error: ${part.error}${colors.reset}\n` + + default: + return null + } +} + +async function main(): Promise { + const args = parseArgs(process.argv) + + if (args.help) { + printHelp() + process.exit(0) + } + + if (args.createConfig) { + console.log(writeConfigTemplate()) + process.exit(0) + } + + // Default to ./kiana.jsonc if no config specified + const configPath = args.config || "./kiana.jsonc" + + if (!fs.existsSync(configPath)) { + if (args.config) { + console.error(`Error: Config file not found: ${configPath}`) + } else { + console.error("Error: No config file found. Either:") + console.error(" - Create ./kiana.jsonc in the current directory") + console.error(" - Specify a config file with --config ") + console.error(" - Generate a template with --create-config > kiana.jsonc") + } + process.exit(1) + } + + // Load config + const config = loadConfig(configPath) + + // Create language model + const model = createLanguageModel(config.provider) + + // Create agent + const agent = new CodingAgent({ + model, + workingDirectory: config.workingDirectory || process.cwd(), + instructions: config.systemPrompt, + tools: config.tools ?? undefined, + maxSteps: config.maxSteps, + maxRetries: config.maxRetries, + sessionDir: args.session, + }) + + // Set up output handler + const outputPart = (part: StreamPart) => { + if (args.humanReadable) { + const formatted = formatPartHumanReadable(part, args.verbose) + if (formatted) { + process.stdout.write(formatted) + } + } else { + // SSE format + process.stdout.write(formatSSE(part)) + } + } + + // Subscribe to stream parts + agent.onStream(outputPart) + + // Handle signals + process.on("SIGINT", () => { + agent.abort() + process.exit(0) + }) + + process.on("SIGTERM", () => { + agent.abort() + process.exit(0) + }) + + // If --prompt is provided, send it and exit after completion + if (args.prompt) { + // Print prompt in human-readable mode + if (args.humanReadable) { + console.log(args.prompt) + } + + if (config.streaming !== false) { + const result = await agent.stream({ prompt: args.prompt }) + // Consume the text stream to trigger events + for await (const _chunk of result.textStream) { + // Events are emitted via onStream callback + } + // Wait for completion + await result.text + } else { + await agent.generate({ prompt: args.prompt }) + } + + // Output [DONE] in SSE mode + if (!args.humanReadable) { + process.stdout.write("data: [DONE]\n\n") + } + + process.exit(0) + } + + // Otherwise, read commands from stdin (JSON protocol) + const rl = readline.createInterface({ + input: process.stdin, + output: undefined, + terminal: false, + }) + + let pendingPromise: Promise | null = null + + rl.on("line", async (line) => { + if (!line.trim()) return + + try { + const command = JSON.parse(line) + + switch (command.type) { + case "message": + if (pendingPromise) { + console.error(JSON.stringify({ error: "Already processing a message" })) + return + } + + pendingPromise = (async () => { + try { + if (config.streaming !== false) { + const result = await agent.stream({ prompt: command.text }) + for await (const _chunk of result.textStream) { + // Events emitted via callback + } + await result.text + } else { + await agent.generate({ prompt: command.text }) + } + } catch (err) { + const errorPart: StreamPart = { + type: "error", + error: err instanceof Error ? err.message : String(err), + } + outputPart(errorPart) + } finally { + pendingPromise = null + } + })() + break + + case "abort": + agent.abort() + break + + default: + console.error(JSON.stringify({ error: `Unknown command type: ${command.type}` })) + } + } catch (err) { + console.error(JSON.stringify({ error: `Invalid JSON: ${err}` })) + } + }) + + rl.on("close", () => { + agent.abort() + process.exit(0) + }) +} + +main().catch((err) => { + console.error("Fatal error:", err) + process.exit(1) +}) diff --git a/packages/kiana-v6/src/config.ts b/packages/kiana-v6/src/config.ts new file mode 100644 index 00000000000..6b41cbfd8fb --- /dev/null +++ b/packages/kiana-v6/src/config.ts @@ -0,0 +1,123 @@ +import { z } from "zod" +import { readFileSync, writeFileSync } from "node:fs" +import { parse as parseJsonc } from "jsonc-parser" + +export const ProviderConfigSchema = z.object({ + type: z.enum(["anthropic", "openai", "openai-compatible", "google"]), + apiKey: z.string(), + model: z.string(), + baseUrl: z.string().nullable().optional(), +}) + +export type ProviderConfig = z.infer + +export const ConfigSchema = z.object({ + provider: ProviderConfigSchema, + systemPrompt: z.string().optional(), + workingDirectory: z.string().nullable().optional(), + tools: z.array(z.string()).nullable().optional(), + // Maximum number of steps in the agent loop (default: 50) + maxSteps: z.number().int().min(1).max(100).optional().default(50), + // Maximum number of retries for rate limit errors (default: 5) + // Uses exponential backoff and respects Retry-After headers + maxRetries: z.number().int().min(0).max(10).optional().default(5), + // Enable streaming mode (default: true) + streaming: z.boolean().optional().default(true), +}) + +export type Config = z.infer + +/** + * Load and validate config from a JSONC file. + */ +export function loadConfig(path: string): Config { + const content = readFileSync(path, "utf-8") + const parsed = parseJsonc(content) + + const result = ConfigSchema.safeParse(parsed) + if (!result.success) { + const errors = result.error.issues + .map((e: z.ZodIssue) => ` ${e.path.join(".")}: ${e.message}`) + .join("\n") + throw new Error(`Invalid config at ${path}:\n${errors}`) + } + + return result.data +} + +/** + * Default system prompt for Kiana headless mode. + */ +export const DEFAULT_SYSTEM_PROMPT = `You are Kiana, a powerful coding agent running in headless mode. + +You help users with software engineering tasks using the tools available to you. You are running non-interactively, so you cannot ask clarifying questions - make reasonable assumptions and proceed. + +IMPORTANT: You must NEVER generate or guess URLs unless you are confident they are for programming help. + +# Tone and style +- No emojis unless explicitly requested. +- Responses should be short and concise. +- Use Github-flavored markdown for formatting. +- Output text to communicate; use tools only to complete tasks. +- NEVER create files unless absolutely necessary. Prefer editing existing files. + +# Professional objectivity +Prioritize technical accuracy over validation. Focus on facts and problem-solving with direct, objective technical info. + +# Non-interactive mode +Since you are running headless without user interaction: +- Do not ask for clarification - make reasonable assumptions +- Complete tasks autonomously +- Report progress and results via tool outputs + +# Tool usage +- Call multiple tools in parallel when independent. +- Use specialized tools (Read, Write, Edit, Glob, Grep) instead of bash equivalents. +- CRITICAL: When calling tools, pass arguments as a proper JSON object, NOT as a string. + - Correct: {"command": "git status", "description": "Check git status"} + - Wrong: "{\"command\": \"git status\", \"description\": \"Check git status\"}" +- Never stringify or escape the arguments object - pass it directly as structured data. + +# Code References +When referencing code, include \`file_path:line_number\` for navigation.` + +/** + * Config template for --create-config flag. + */ +export const CONFIG_TEMPLATE = `{ + // Provider configuration + "provider": { + "type": "anthropic", // "anthropic" | "openai" | "openai-compatible" | "google" + "apiKey": "YOUR_API_KEY", + "model": "claude-sonnet-4-20250514", + "baseUrl": null // Required for openai-compatible + }, + + // System prompt (optional, has sensible default) + "systemPrompt": null, + + // Working directory for tools (optional, defaults to current dir) + "workingDirectory": null, + + // Tool whitelist (optional, null = all tools enabled) + // Available: bash, read, write, edit, glob, grep, list, webfetch, websearch, codesearch, todowrite, todoread, task + "tools": null, + + // Maximum steps in agent loop (default: 50) + "maxSteps": 50, + + // Maximum retries for rate limit errors (default: 5) + // Uses exponential backoff and respects Retry-After headers + "maxRetries": 5 +} +` + +/** + * Get config template string (or write to file if path provided). + */ +export function writeConfigTemplate(path?: string): string { + if (path) { + writeFileSync(path, CONFIG_TEMPLATE, "utf-8") + } + return CONFIG_TEMPLATE +} diff --git a/packages/kiana-v6/src/event.ts b/packages/kiana-v6/src/event.ts new file mode 100644 index 00000000000..5ed488c26a5 --- /dev/null +++ b/packages/kiana-v6/src/event.ts @@ -0,0 +1,111 @@ +import type { Event, EventType, SessionEvent, MessageEvent, PartEvent, TodoEvent } from "./types/event.js" + +// Re-export types for convenience +export type { Event as EventTypes, EventType, SessionEvent, MessageEvent, PartEvent, TodoEvent } + +type EventCallback = (event: Event) => void +type TypedCallback = ( + event: Extract +) => void + +/** + * Simple typed event bus for Kiana events. + * Supports both typed subscriptions (by event type) and global subscriptions. + */ +export class EventBus { + private listeners: Map> = new Map() + + /** + * Subscribe to a specific event type. + * Returns an unsubscribe function. + */ + subscribe(callback: EventCallback): () => void + subscribe(type: T, callback: TypedCallback): () => void + subscribe( + typeOrCallback: T | EventCallback, + callback?: TypedCallback + ): () => void { + if (typeof typeOrCallback === "function") { + // Global subscription + return this.subscribeAll(typeOrCallback) + } + + // Type-specific subscription + const type = typeOrCallback + const cb = callback! + const listeners = this.listeners.get(type) ?? new Set() + listeners.add(cb as EventCallback) + this.listeners.set(type, listeners) + + return () => { + listeners.delete(cb as EventCallback) + if (listeners.size === 0) { + this.listeners.delete(type) + } + } + } + + /** + * Subscribe to all events. + * Returns an unsubscribe function. + */ + subscribeAll(callback: EventCallback): () => void { + const listeners = this.listeners.get("*") ?? new Set() + listeners.add(callback) + this.listeners.set("*", listeners) + + return () => { + listeners.delete(callback) + if (listeners.size === 0) { + this.listeners.delete("*") + } + } + } + + /** + * Publish an event to all matching subscribers. + * Alias: emit + */ + publish(event: Event): void { + // Notify type-specific listeners + const typeListeners = this.listeners.get(event.type) + if (typeListeners) { + for (const callback of typeListeners) { + try { + callback(event) + } catch (err) { + console.error(`Error in event listener for ${event.type}:`, err) + } + } + } + + // Notify global listeners + const globalListeners = this.listeners.get("*") + if (globalListeners) { + for (const callback of globalListeners) { + try { + callback(event) + } catch (err) { + console.error(`Error in global event listener:`, err) + } + } + } + } + + /** + * Emit an event (alias for publish). + */ + emit(event: Event): void { + this.publish(event) + } + + /** + * Remove all listeners. + */ + clear(): void { + this.listeners.clear() + } +} + +// Default singleton instance +export const eventBus = new EventBus() diff --git a/packages/kiana-v6/src/index.ts b/packages/kiana-v6/src/index.ts new file mode 100644 index 00000000000..65092df37bb --- /dev/null +++ b/packages/kiana-v6/src/index.ts @@ -0,0 +1,106 @@ +// Version +export const VERSION = "0.1.0" + +// Main exports +export { + CodingAgent, + type CodingAgentConfig, + type GenerateParams, + type StreamParams, + type StreamCallback, + type StreamPart, + formatSSE, + parseSSE, + createSSEHeaders, +} from "./agent.js" + +// Stream types (AI SDK UI compatible) +export type { + StartPart, + TextStartPart, + TextDeltaPart, + TextEndPart, + ReasoningStartPart, + ReasoningDeltaPart, + ReasoningEndPart, + ToolInputStartPart, + ToolInputDeltaPart, + ToolInputAvailablePart, + ToolOutputAvailablePart, + ToolOutputErrorPart, + StartStepPart, + FinishStepPart, + FinishPart, + DataSessionPart, + DataSessionIdlePart, + DataTodoPart, + DataSubagentContextPart, + ErrorPart, + MessageMetadata, + ToolMetadata, + TokenUsage as StreamTokenUsage, +} from "./stream.js" + +// Config +export { loadConfig, writeConfigTemplate, DEFAULT_SYSTEM_PROMPT, type Config } from "./config.js" + +// Provider +export { createLanguageModel } from "./provider.js" + +// Event system (legacy - prefer StreamPart for new code) +export { EventBus, type EventTypes } from "./event.js" + +// Tools +export { + defineTool, + type Tool, + type ToolContext, + type ToolResult, + ALL_TOOLS, + DEFAULT_TOOLS, + TOOL_CATEGORIES, + getTools, + getToolsExcept, + // Individual tools + bashTool, + readTool, + writeTool, + editTool, + globTool, + grepTool, + listTool, + webfetchTool, + websearchTool, + codesearchTool, + todoWriteTool, + todoReadTool, + taskTool, + invalidTool, + // Todo helpers + getTodos, + setTodos, + type TodoInfo, + // Task helpers + setSubagentExecutor, + getSubagentExecutor, + getAgentConfig, + getAvailableAgentTypes, + type SubagentExecutor, +} from "./tool/index.js" + +// Types +export type { + SessionInfo, + FileDiff, + SessionSummary, + MessageInfo, + TokenUsage, + MessageError, + Part, + TextPart, + ReasoningPart, + ToolPart, + ToolState, + StepStartPart, + StepFinishPart, +} from "./types/index.js" diff --git a/packages/kiana-v6/src/provider.ts b/packages/kiana-v6/src/provider.ts new file mode 100644 index 00000000000..8f2734cb7db --- /dev/null +++ b/packages/kiana-v6/src/provider.ts @@ -0,0 +1,58 @@ +import { createAnthropic } from "@ai-sdk/anthropic" +import { createOpenAI } from "@ai-sdk/openai" +import { createOpenAICompatible } from "@ai-sdk/openai-compatible" +import { createGoogleGenerativeAI } from "@ai-sdk/google" +import type { LanguageModel } from "ai" +import type { ProviderConfig } from "./config.js" + +/** + * Create a language model from provider config. + */ +export function createLanguageModel(config: ProviderConfig): LanguageModel { + switch (config.type) { + case "anthropic": { + const anthropic = createAnthropic({ + apiKey: config.apiKey, + baseURL: config.baseUrl ?? undefined, + }) + return anthropic(config.model) + } + + case "openai": { + const openai = createOpenAI({ + apiKey: config.apiKey, + baseURL: config.baseUrl ?? undefined, + }) + return openai(config.model) + } + + case "openai-compatible": { + if (!config.baseUrl) { + throw new Error("baseUrl is required for openai-compatible provider") + } + // Use openai-compatible provider for APIs like Ark, Moonshot, etc. + // includeUsage: true sends stream_options.include_usage to get token counts in streaming mode + // (requires provider support for OpenAI's streaming usage extension) + const compatible = createOpenAICompatible({ + name: "openai-compatible", + apiKey: config.apiKey, + baseURL: config.baseUrl, + includeUsage: true, + }) + return compatible(config.model) + } + + case "google": { + const google = createGoogleGenerativeAI({ + apiKey: config.apiKey, + baseURL: config.baseUrl ?? undefined, + }) + return google(config.model) + } + + default: { + const _exhaustive: never = config.type + throw new Error(`Unknown provider type: ${_exhaustive}`) + } + } +} diff --git a/packages/kiana-v6/src/stream.ts b/packages/kiana-v6/src/stream.ts new file mode 100644 index 00000000000..1245f1a7c21 --- /dev/null +++ b/packages/kiana-v6/src/stream.ts @@ -0,0 +1,288 @@ +/** + * AI SDK UI-compatible stream protocol types and utilities. + * + * This implements the Vercel AI SDK's UIMessageStream protocol for compatibility + * with useChat and other AI SDK UI components. + * + * @see https://ai-sdk.dev/docs/ai-sdk-ui/stream-protocol + */ + +import type { SessionInfo } from "./types/session.js" + +// ============================================================================ +// Stream Part Types (wire format) +// ============================================================================ + +/** Message start - indicates beginning of a new message */ +export interface StartPart { + type: "start" + messageId: string + metadata?: MessageMetadata +} + +/** Text streaming start */ +export interface TextStartPart { + type: "text-start" + id: string +} + +/** Incremental text content */ +export interface TextDeltaPart { + type: "text-delta" + id: string + delta: string +} + +/** Text streaming complete */ +export interface TextEndPart { + type: "text-end" + id: string +} + +/** Reasoning streaming start */ +export interface ReasoningStartPart { + type: "reasoning-start" + id: string +} + +/** Incremental reasoning content */ +export interface ReasoningDeltaPart { + type: "reasoning-delta" + id: string + delta: string +} + +/** Reasoning streaming complete */ +export interface ReasoningEndPart { + type: "reasoning-end" + id: string +} + +/** Tool input streaming start */ +export interface ToolInputStartPart { + type: "tool-input-start" + toolCallId: string + toolName: string +} + +/** Tool input delta */ +export interface ToolInputDeltaPart { + type: "tool-input-delta" + toolCallId: string + inputTextDelta: string +} + +/** Tool input fully available - ready to execute */ +export interface ToolInputAvailablePart { + type: "tool-input-available" + toolCallId: string + toolName: string + input: Record +} + +/** Tool output available - execution complete */ +export interface ToolOutputAvailablePart { + type: "tool-output-available" + toolCallId: string + output: unknown + metadata?: ToolMetadata +} + +/** Tool execution error */ +export interface ToolOutputErrorPart { + type: "tool-output-error" + toolCallId: string + error: string +} + +/** Step start marker */ +export interface StartStepPart { + type: "start-step" +} + +/** Step finish marker */ +export interface FinishStepPart { + type: "finish-step" + usage?: TokenUsage +} + +/** Message finish marker */ +export interface FinishPart { + type: "finish" + finishReason?: "stop" | "tool-calls" | "length" | "content-filter" | "error" + usage?: TokenUsage +} + +/** Custom data part for session info */ +export interface DataSessionPart { + type: "data-session" + data: SessionInfo +} + +/** Custom data part for session idle */ +export interface DataSessionIdlePart { + type: "data-session-idle" + data: { sessionID: string } +} + +/** Custom data part for todos */ +export interface DataTodoPart { + type: "data-todo" + data: { + sessionID: string + todos: Array<{ + content: string + status: "pending" | "in_progress" | "completed" + activeForm: string + }> + } +} + +/** Custom data part for subagent context */ +export interface DataSubagentContextPart { + type: "data-subagent-context" + data: { + parentSessionID: string + depth: number + agentType?: string + } +} + +/** Error part */ +export interface ErrorPart { + type: "error" + error: string +} + +/** Stream termination marker (not JSON, literal string) */ +export type DonePart = "[DONE]" + +// ============================================================================ +// Union of all stream parts +// ============================================================================ + +export type StreamPart = + | StartPart + | TextStartPart + | TextDeltaPart + | TextEndPart + | ReasoningStartPart + | ReasoningDeltaPart + | ReasoningEndPart + | ToolInputStartPart + | ToolInputDeltaPart + | ToolInputAvailablePart + | ToolOutputAvailablePart + | ToolOutputErrorPart + | StartStepPart + | FinishStepPart + | FinishPart + | DataSessionPart + | DataSessionIdlePart + | DataTodoPart + | DataSubagentContextPart + | ErrorPart + +// ============================================================================ +// Metadata types +// ============================================================================ + +export interface MessageMetadata { + sessionID?: string + parentMessageID?: string + model?: string + [key: string]: unknown +} + +export interface ToolMetadata { + title?: string + executionTime?: number + [key: string]: unknown +} + +export interface TokenUsage { + inputTokens?: number + outputTokens?: number + reasoningTokens?: number + cacheReadTokens?: number + cacheWriteTokens?: number +} + +// ============================================================================ +// Stream Writer +// ============================================================================ + +export interface StreamWriter { + /** Write a stream part */ + write(part: StreamPart): void + /** Write the [DONE] termination marker */ + done(): void + /** Close the stream */ + close(): void +} + +/** + * Create a stream writer that formats parts as SSE. + */ +export function createStreamWriter( + writable: WritableStreamDefaultWriter +): StreamWriter { + const encoder = new TextEncoder() + + return { + write(part: StreamPart) { + const line = `data: ${JSON.stringify(part)}\n\n` + writable.write(encoder.encode(line)) + }, + done() { + writable.write(encoder.encode("data: [DONE]\n\n")) + }, + close() { + writable.close() + }, + } +} + +/** + * Format a stream part as an SSE line (for CLI/non-HTTP usage). + */ +export function formatSSE(part: StreamPart | DonePart): string { + if (part === "[DONE]") { + return "data: [DONE]\n\n" + } + return `data: ${JSON.stringify(part)}\n\n` +} + +/** + * Parse an SSE line back to a stream part. + */ +export function parseSSE(line: string): StreamPart | DonePart | null { + if (!line.startsWith("data: ")) { + return null + } + const data = line.slice(6).trim() + if (data === "[DONE]") { + return "[DONE]" + } + try { + return JSON.parse(data) as StreamPart + } catch { + return null + } +} + +// ============================================================================ +// HTTP Response helpers (for web server usage) +// ============================================================================ + +/** + * Create SSE headers for AI SDK UI stream protocol. + */ +export function createSSEHeaders(): Record { + return { + "Content-Type": "text/event-stream", + "Cache-Control": "no-cache", + "Connection": "keep-alive", + "x-vercel-ai-ui-message-stream": "v1", + } +} diff --git a/packages/kiana-v6/src/tool/bash.ts b/packages/kiana-v6/src/tool/bash.ts new file mode 100644 index 00000000000..4f3f3d8f24d --- /dev/null +++ b/packages/kiana-v6/src/tool/bash.ts @@ -0,0 +1,252 @@ +import { z } from "zod" +import { spawn } from "node:child_process" +import { defineTool } from "./tool.js" + +const DEFAULT_MAX_OUTPUT_LENGTH = 30_000 +const MAX_OUTPUT_LENGTH = (() => { + const parsed = Number(process.env.KIANA_BASH_MAX_OUTPUT_LENGTH) + return Number.isInteger(parsed) && parsed > 0 ? parsed : DEFAULT_MAX_OUTPUT_LENGTH +})() +const DEFAULT_TIMEOUT = 2 * 60 * 1000 // 2 minutes +const MAX_TIMEOUT = 10 * 60 * 1000 // 10 minutes +const SIGKILL_TIMEOUT_MS = 200 + +const DESCRIPTION = `Executes a given bash command in a persistent shell session with optional timeout, ensuring proper handling and security measures. + +Before executing the command, please follow these steps: + +1. Directory Verification: + - If the command will create new directories or files, first use the List tool to verify the parent directory exists and is the correct location + - For example, before running "mkdir foo/bar", first use List to check that "foo" exists and is the intended parent directory + +2. Command Execution: + - Always quote file paths that contain spaces with double quotes (e.g., cd "path with spaces/file.txt") + - Examples of proper quoting: + - cd "/Users/name/My Documents" (correct) + - cd /Users/name/My Documents (incorrect - will fail) + - python "/path/with spaces/script.py" (correct) + - python /path/with spaces/script.py (incorrect - will fail) + - After ensuring proper quoting, execute the command. + - Capture the output of the command. + +Usage notes: + - The command argument is required. + - You can specify an optional timeout in milliseconds (up to 600000ms / 10 minutes). If not specified, commands will timeout after 120000ms (2 minutes). + - It is very helpful if you write a clear, concise description of what this command does in 5-10 words. + - If the output exceeds 30000 characters, output will be truncated before being returned to you. + - VERY IMPORTANT: You MUST avoid using search commands like \`find\` and \`grep\`. Instead use Grep, Glob, or Task to search. You MUST avoid read tools like \`cat\`, \`head\`, \`tail\`, and \`ls\`, and use Read and List to read files. + - If you _still_ need to run \`grep\`, STOP. ALWAYS USE ripgrep at \`rg\` (or /usr/bin/rg) first, which all users have pre-installed. + - When issuing multiple commands, use the ';' or '&&' operator to separate them. DO NOT use newlines (newlines are ok in quoted strings). + - Try to maintain your current working directory throughout the session by using absolute paths and avoiding usage of \`cd\`. You may use \`cd\` if the User explicitly requests it. + + pytest /foo/bar/tests + + + cd /foo/bar && pytest tests + + + +# Committing changes with git + +IMPORTANT: ONLY COMMIT IF THE USER ASKS YOU TO. + +If and only if the user asks you to create a new git commit, follow these steps carefully: + +1. You have the capability to call multiple tools in a single response. When multiple independent pieces of information are requested, batch your tool calls together for optimal performance. ALWAYS run the following bash commands in parallel, each using the Bash tool: + - Run a git status command to see all untracked files. + - Run a git diff command to see both staged and unstaged changes that will be committed. + - Run a git log command to see recent commit messages, so that you can follow this repository's commit message style. + +2. Analyze all staged changes (both previously staged and newly added) and draft a commit message. + +3. Create the commit with a meaningful message. + +Important notes: +- Use the git context at the start of this conversation to determine which files are relevant to your commit. +- NEVER update the git config +- DO NOT push to the remote repository +- IMPORTANT: Never use git commands with the -i flag (like git rebase -i or git add -i) since they require interactive input which is not supported. +- If there are no changes to commit (i.e., no untracked files and no modifications), do not create an empty commit + +# Creating pull requests +Use the gh command via the Bash tool for ALL GitHub-related tasks including working with issues, pull requests, checks, and releases.` + +export const bashTool = defineTool("bash", { + description: DESCRIPTION, + parameters: z.object({ + command: z.string().describe("The command to execute"), + timeout: z.number().describe("Optional timeout in milliseconds").optional(), + description: z + .string() + .describe( + "Clear, concise description of what this command does in 5-10 words. Examples:\nInput: ls\nOutput: Lists files in current directory\n\nInput: git status\nOutput: Shows working tree status\n\nInput: npm install\nOutput: Installs package dependencies\n\nInput: mkdir foo\nOutput: Creates directory 'foo'" + ), + }), + async execute(params, ctx) { + if (params.timeout !== undefined && params.timeout < 0) { + throw new Error(`Invalid timeout value: ${params.timeout}. Timeout must be a positive number.`) + } + const timeout = Math.min(params.timeout ?? DEFAULT_TIMEOUT, MAX_TIMEOUT) + + // Determine shell to use + const shell = (() => { + const s = process.env.SHELL + if (s) { + // Avoid fish and nu shells + if (!new Set(["/bin/fish", "/bin/nu", "/usr/bin/fish", "/usr/bin/nu"]).has(s)) { + return s + } + } + + if (process.platform === "darwin") { + return "/bin/zsh" + } + + if (process.platform === "win32") { + return process.env.COMSPEC || "cmd.exe" + } + + // Try to find bash + return "/bin/bash" + })() + + const proc = spawn(params.command, { + shell, + cwd: ctx.workingDirectory, + env: { + ...process.env, + }, + stdio: ["ignore", "pipe", "pipe"], + detached: process.platform !== "win32", + }) + + let output = "" + + // Initialize metadata with empty output + ctx.metadata({ + metadata: { + output: "", + description: params.description, + }, + }) + + const append = (chunk: Buffer) => { + if (output.length <= MAX_OUTPUT_LENGTH) { + output += chunk.toString() + ctx.metadata({ + metadata: { + output, + description: params.description, + }, + }) + } + } + + proc.stdout?.on("data", append) + proc.stderr?.on("data", append) + + let timedOut = false + let aborted = false + let exited = false + + const killTree = async () => { + const pid = proc.pid + if (!pid || exited) { + return + } + + if (process.platform === "win32") { + await new Promise((resolve) => { + const killer = spawn("taskkill", ["/pid", String(pid), "/f", "/t"], { stdio: "ignore" }) + killer.once("exit", resolve) + killer.once("error", () => resolve()) + }) + return + } + + try { + process.kill(-pid, "SIGTERM") + await sleep(SIGKILL_TIMEOUT_MS) + if (!exited) { + process.kill(-pid, "SIGKILL") + } + } catch { + proc.kill("SIGTERM") + await sleep(SIGKILL_TIMEOUT_MS) + if (!exited) { + proc.kill("SIGKILL") + } + } + } + + if (ctx.abort.aborted) { + aborted = true + await killTree() + } + + const abortHandler = () => { + aborted = true + void killTree() + } + + ctx.abort.addEventListener("abort", abortHandler, { once: true }) + + const timeoutTimer = setTimeout(() => { + timedOut = true + void killTree() + }, timeout + 100) + + await new Promise((resolve, reject) => { + const cleanup = () => { + clearTimeout(timeoutTimer) + ctx.abort.removeEventListener("abort", abortHandler) + } + + proc.once("exit", () => { + exited = true + cleanup() + resolve() + }) + + proc.once("error", (error) => { + exited = true + cleanup() + reject(error) + }) + }) + + const resultMetadata: string[] = [""] + + if (output.length > MAX_OUTPUT_LENGTH) { + output = output.slice(0, MAX_OUTPUT_LENGTH) + resultMetadata.push(`bash tool truncated output as it exceeded ${MAX_OUTPUT_LENGTH} char limit`) + } + + if (timedOut) { + resultMetadata.push(`bash tool terminated command after exceeding timeout ${timeout} ms`) + } + + if (aborted) { + resultMetadata.push("User aborted the command") + } + + if (resultMetadata.length > 1) { + resultMetadata.push("") + output += "\n\n" + resultMetadata.join("\n") + } + + return { + title: params.description, + metadata: { + output, + exit: proc.exitCode, + description: params.description, + }, + output, + } + }, +}) + +function sleep(ms: number): Promise { + return new Promise((resolve) => setTimeout(resolve, ms)) +} diff --git a/packages/kiana-v6/src/tool/codesearch.ts b/packages/kiana-v6/src/tool/codesearch.ts new file mode 100644 index 00000000000..0d407e01af6 --- /dev/null +++ b/packages/kiana-v6/src/tool/codesearch.ts @@ -0,0 +1,135 @@ +import { z } from "zod" +import { defineTool } from "./tool.js" + +const DESCRIPTION = `- Search and get relevant context for any programming task using Exa Code API +- Provides the highest quality and freshest context for libraries, SDKs, and APIs +- Use this tool for ANY question or task related to programming +- Returns comprehensive code examples, documentation, and API references +- Optimized for finding specific programming patterns and solutions + +Usage notes: + - Adjustable token count (1000-50000) for focused or comprehensive results + - Default 5000 tokens provides balanced context for most queries + - Use lower values for specific questions, higher values for comprehensive documentation + - Supports queries about frameworks, libraries, APIs, and programming concepts + - Examples: 'React useState hook examples', 'Python pandas dataframe filtering', 'Express.js middleware'` + +const API_CONFIG = { + BASE_URL: "https://mcp.exa.ai", + ENDPOINTS: { + CONTEXT: "/mcp", + }, +} as const + +interface McpCodeRequest { + jsonrpc: string + id: number + method: string + params: { + name: string + arguments: { + query: string + tokensNum: number + } + } +} + +interface McpCodeResponse { + jsonrpc: string + result: { + content: Array<{ + type: string + text: string + }> + } +} + +export const codesearchTool = defineTool("codesearch", { + description: DESCRIPTION, + parameters: z.object({ + query: z + .string() + .describe( + "Search query to find relevant context for APIs, Libraries, and SDKs. For example, 'React useState hook examples', 'Python pandas dataframe filtering', 'Express.js middleware', 'Next js partial prerendering configuration'" + ), + tokensNum: z + .number() + .min(1000) + .max(50000) + .default(5000) + .describe( + "Number of tokens to return (1000-50000). Default is 5000 tokens. Adjust this value based on how much context you need - use lower values for focused queries and higher values for comprehensive documentation." + ), + }), + async execute(params, ctx) { + const codeRequest: McpCodeRequest = { + jsonrpc: "2.0", + id: 1, + method: "tools/call", + params: { + name: "get_code_context_exa", + arguments: { + query: params.query, + tokensNum: params.tokensNum || 5000, + }, + }, + } + + const controller = new AbortController() + const timeoutId = setTimeout(() => controller.abort(), 30000) + + try { + const headers: Record = { + accept: "application/json, text/event-stream", + "content-type": "application/json", + } + + const signals = ctx.abort ? [controller.signal, ctx.abort] : [controller.signal] + const response = await fetch(`${API_CONFIG.BASE_URL}${API_CONFIG.ENDPOINTS.CONTEXT}`, { + method: "POST", + headers, + body: JSON.stringify(codeRequest), + signal: AbortSignal.any(signals), + }) + + clearTimeout(timeoutId) + + if (!response.ok) { + const errorText = await response.text() + throw new Error(`Code search error (${response.status}): ${errorText}`) + } + + const responseText = await response.text() + + // Parse SSE response + const lines = responseText.split("\n") + for (const line of lines) { + if (line.startsWith("data: ")) { + const data: McpCodeResponse = JSON.parse(line.substring(6)) + if (data.result && data.result.content && data.result.content.length > 0) { + return { + output: data.result.content[0].text, + title: `Code search: ${params.query}`, + metadata: {}, + } + } + } + } + + return { + output: + "No code snippets or documentation found. Please try a different query, be more specific about the library or programming concept, or check the spelling of framework names.", + title: `Code search: ${params.query}`, + metadata: {}, + } + } catch (error) { + clearTimeout(timeoutId) + + if (error instanceof Error && error.name === "AbortError") { + throw new Error("Code search request timed out") + } + + throw error + } + }, +}) diff --git a/packages/kiana-v6/src/tool/edit.ts b/packages/kiana-v6/src/tool/edit.ts new file mode 100644 index 00000000000..43de87b5012 --- /dev/null +++ b/packages/kiana-v6/src/tool/edit.ts @@ -0,0 +1,648 @@ +// the approaches in this edit tool are sourced from +// https://github.com/cline/cline/blob/main/evals/diff-edits/diff-apply/diff-06-23-25.ts +// https://github.com/google-gemini/gemini-cli/blob/main/packages/core/src/utils/editCorrector.ts +// https://github.com/cline/cline/blob/main/evals/diff-edits/diff-apply/diff-06-26-25.ts + +import { z } from "zod" +import * as fs from "node:fs" +import * as path from "node:path" +import { defineTool } from "./tool.js" + +const DESCRIPTION = `Performs exact string replacements in files. + +Usage: +- You must use your \`Read\` tool at least once in the conversation before editing. This tool will error if you attempt an edit without reading the file. +- When editing text from Read tool output, ensure you preserve the exact indentation (tabs/spaces) as it appears AFTER the line number prefix. The line number prefix format is: spaces + line number + tab. Everything after that tab is the actual file content to match. Never include any part of the line number prefix in the oldString or newString. +- ALWAYS prefer editing existing files in the codebase. NEVER write new files unless explicitly required. +- Only use emojis if the user explicitly requests it. Avoid adding emojis to files unless asked. +- The edit will FAIL if \`oldString\` is not found in the file with an error "oldString not found in content". +- The edit will FAIL if \`oldString\` is found multiple times in the file with an error "oldString found multiple times and requires more code context to uniquely identify the intended match". Either provide a larger string with more surrounding context to make it unique or use \`replaceAll\` to change every instance of \`oldString\`. +- Use \`replaceAll\` for replacing and renaming strings across the file. This parameter is useful if you want to rename a variable for instance.` + +export const editTool = defineTool("edit", { + description: DESCRIPTION, + parameters: z.object({ + filePath: z.string().describe("The absolute path to the file to modify"), + oldString: z.string().describe("The text to replace"), + newString: z + .string() + .describe("The text to replace it with (must be different from oldString)"), + replaceAll: z + .boolean() + .optional() + .describe("Replace all occurrences of oldString (default false)"), + }), + async execute(params, ctx) { + if (!params.filePath) { + throw new Error("filePath is required") + } + + if (params.oldString === params.newString) { + throw new Error("oldString and newString must be different") + } + + const filePath = path.isAbsolute(params.filePath) + ? params.filePath + : path.join(ctx.workingDirectory, params.filePath) + + let diff = "" + let contentOld = "" + let contentNew = "" + + // Handle creating new file with empty oldString + if (params.oldString === "") { + contentNew = params.newString + diff = createSimpleDiff(filePath, contentOld, contentNew) + + // Ensure parent directory exists + const dir = path.dirname(filePath) + if (!fs.existsSync(dir)) { + fs.mkdirSync(dir, { recursive: true }) + } + + fs.writeFileSync(filePath, params.newString, "utf-8") + + return { + metadata: { + diagnostics: {}, + diff, + filediff: { + file: filePath, + before: contentOld, + after: contentNew, + additions: countLines(contentNew), + deletions: 0, + }, + }, + title: path.relative(ctx.workingDirectory, filePath), + output: "", + } + } + + // Read existing file + if (!fs.existsSync(filePath)) { + throw new Error(`File ${filePath} not found`) + } + + const stats = fs.statSync(filePath) + if (stats.isDirectory()) { + throw new Error(`Path is a directory, not a file: ${filePath}`) + } + + contentOld = fs.readFileSync(filePath, "utf-8") + contentNew = replace(contentOld, params.oldString, params.newString, params.replaceAll) + + diff = createSimpleDiff(filePath, normalizeLineEndings(contentOld), normalizeLineEndings(contentNew)) + + fs.writeFileSync(filePath, contentNew, "utf-8") + + // Re-read to get actual content + contentNew = fs.readFileSync(filePath, "utf-8") + diff = createSimpleDiff(filePath, normalizeLineEndings(contentOld), normalizeLineEndings(contentNew)) + + // Calculate additions/deletions + const oldLines = contentOld.split("\n") + const newLines = contentNew.split("\n") + let additions = 0 + let deletions = 0 + + // Simple diff counting + const oldSet = new Set(oldLines) + const newSet = new Set(newLines) + + for (const line of newLines) { + if (!oldSet.has(line)) additions++ + } + for (const line of oldLines) { + if (!newSet.has(line)) deletions++ + } + + return { + metadata: { + diagnostics: {}, + diff, + filediff: { + file: filePath, + before: contentOld, + after: contentNew, + additions, + deletions, + }, + }, + title: path.relative(ctx.workingDirectory, filePath), + output: "", + } + }, +}) + +function normalizeLineEndings(text: string): string { + return text.replaceAll("\r\n", "\n") +} + +function countLines(text: string): number { + return text.split("\n").length +} + +function createSimpleDiff(filePath: string, oldContent: string, newContent: string): string { + const oldLines = oldContent.split("\n") + const newLines = newContent.split("\n") + + const lines: string[] = [ + `--- ${filePath}`, + `+++ ${filePath}`, + ] + + // Simple unified diff format + let i = 0 + let j = 0 + + while (i < oldLines.length || j < newLines.length) { + if (i >= oldLines.length) { + lines.push(`+${newLines[j]}`) + j++ + } else if (j >= newLines.length) { + lines.push(`-${oldLines[i]}`) + i++ + } else if (oldLines[i] === newLines[j]) { + lines.push(` ${oldLines[i]}`) + i++ + j++ + } else { + // Find next matching line + let foundMatch = false + for (let lookAhead = 1; lookAhead < 5; lookAhead++) { + if (i + lookAhead < oldLines.length && oldLines[i + lookAhead] === newLines[j]) { + // Old lines were deleted + for (let k = 0; k < lookAhead; k++) { + lines.push(`-${oldLines[i + k]}`) + } + i += lookAhead + foundMatch = true + break + } + if (j + lookAhead < newLines.length && oldLines[i] === newLines[j + lookAhead]) { + // New lines were added + for (let k = 0; k < lookAhead; k++) { + lines.push(`+${newLines[j + k]}`) + } + j += lookAhead + foundMatch = true + break + } + } + + if (!foundMatch) { + lines.push(`-${oldLines[i]}`) + lines.push(`+${newLines[j]}`) + i++ + j++ + } + } + } + + return lines.join("\n") +} + +// Similarity thresholds for block anchor fallback matching +const SINGLE_CANDIDATE_SIMILARITY_THRESHOLD = 0.0 +const MULTIPLE_CANDIDATES_SIMILARITY_THRESHOLD = 0.3 + +/** + * Levenshtein distance algorithm implementation + */ +function levenshtein(a: string, b: string): number { + if (a === "" || b === "") { + return Math.max(a.length, b.length) + } + const matrix = Array.from({ length: a.length + 1 }, (_, i) => + Array.from({ length: b.length + 1 }, (_, j) => (i === 0 ? j : j === 0 ? i : 0)) + ) + + for (let i = 1; i <= a.length; i++) { + for (let j = 1; j <= b.length; j++) { + const cost = a[i - 1] === b[j - 1] ? 0 : 1 + matrix[i][j] = Math.min( + matrix[i - 1][j] + 1, + matrix[i][j - 1] + 1, + matrix[i - 1][j - 1] + cost + ) + } + } + return matrix[a.length][b.length] +} + +export type Replacer = (content: string, find: string) => Generator + +export const SimpleReplacer: Replacer = function* (_content, find) { + yield find +} + +export const LineTrimmedReplacer: Replacer = function* (content, find) { + const originalLines = content.split("\n") + const searchLines = find.split("\n") + + if (searchLines[searchLines.length - 1] === "") { + searchLines.pop() + } + + for (let i = 0; i <= originalLines.length - searchLines.length; i++) { + let matches = true + + for (let j = 0; j < searchLines.length; j++) { + const originalTrimmed = originalLines[i + j].trim() + const searchTrimmed = searchLines[j].trim() + + if (originalTrimmed !== searchTrimmed) { + matches = false + break + } + } + + if (matches) { + let matchStartIndex = 0 + for (let k = 0; k < i; k++) { + matchStartIndex += originalLines[k].length + 1 + } + + let matchEndIndex = matchStartIndex + for (let k = 0; k < searchLines.length; k++) { + matchEndIndex += originalLines[i + k].length + if (k < searchLines.length - 1) { + matchEndIndex += 1 + } + } + + yield content.substring(matchStartIndex, matchEndIndex) + } + } +} + +export const BlockAnchorReplacer: Replacer = function* (content, find) { + const originalLines = content.split("\n") + const searchLines = find.split("\n") + + if (searchLines.length < 3) { + return + } + + if (searchLines[searchLines.length - 1] === "") { + searchLines.pop() + } + + const firstLineSearch = searchLines[0].trim() + const lastLineSearch = searchLines[searchLines.length - 1].trim() + const searchBlockSize = searchLines.length + + const candidates: Array<{ startLine: number; endLine: number }> = [] + for (let i = 0; i < originalLines.length; i++) { + if (originalLines[i].trim() !== firstLineSearch) { + continue + } + + for (let j = i + 2; j < originalLines.length; j++) { + if (originalLines[j].trim() === lastLineSearch) { + candidates.push({ startLine: i, endLine: j }) + break + } + } + } + + if (candidates.length === 0) { + return + } + + if (candidates.length === 1) { + const { startLine, endLine } = candidates[0] + const actualBlockSize = endLine - startLine + 1 + + let similarity = 0 + let linesToCheck = Math.min(searchBlockSize - 2, actualBlockSize - 2) + + if (linesToCheck > 0) { + for (let j = 1; j < searchBlockSize - 1 && j < actualBlockSize - 1; j++) { + const originalLine = originalLines[startLine + j].trim() + const searchLine = searchLines[j].trim() + const maxLen = Math.max(originalLine.length, searchLine.length) + if (maxLen === 0) { + continue + } + const distance = levenshtein(originalLine, searchLine) + similarity += (1 - distance / maxLen) / linesToCheck + + if (similarity >= SINGLE_CANDIDATE_SIMILARITY_THRESHOLD) { + break + } + } + } else { + similarity = 1.0 + } + + if (similarity >= SINGLE_CANDIDATE_SIMILARITY_THRESHOLD) { + let matchStartIndex = 0 + for (let k = 0; k < startLine; k++) { + matchStartIndex += originalLines[k].length + 1 + } + let matchEndIndex = matchStartIndex + for (let k = startLine; k <= endLine; k++) { + matchEndIndex += originalLines[k].length + if (k < endLine) { + matchEndIndex += 1 + } + } + yield content.substring(matchStartIndex, matchEndIndex) + } + return + } + + let bestMatch: { startLine: number; endLine: number } | null = null + let maxSimilarity = -1 + + for (const candidate of candidates) { + const { startLine, endLine } = candidate + const actualBlockSize = endLine - startLine + 1 + + let similarity = 0 + let linesToCheck = Math.min(searchBlockSize - 2, actualBlockSize - 2) + + if (linesToCheck > 0) { + for (let j = 1; j < searchBlockSize - 1 && j < actualBlockSize - 1; j++) { + const originalLine = originalLines[startLine + j].trim() + const searchLine = searchLines[j].trim() + const maxLen = Math.max(originalLine.length, searchLine.length) + if (maxLen === 0) { + continue + } + const distance = levenshtein(originalLine, searchLine) + similarity += 1 - distance / maxLen + } + similarity /= linesToCheck + } else { + similarity = 1.0 + } + + if (similarity > maxSimilarity) { + maxSimilarity = similarity + bestMatch = candidate + } + } + + if (maxSimilarity >= MULTIPLE_CANDIDATES_SIMILARITY_THRESHOLD && bestMatch) { + const { startLine, endLine } = bestMatch + let matchStartIndex = 0 + for (let k = 0; k < startLine; k++) { + matchStartIndex += originalLines[k].length + 1 + } + let matchEndIndex = matchStartIndex + for (let k = startLine; k <= endLine; k++) { + matchEndIndex += originalLines[k].length + if (k < endLine) { + matchEndIndex += 1 + } + } + yield content.substring(matchStartIndex, matchEndIndex) + } +} + +export const WhitespaceNormalizedReplacer: Replacer = function* (content, find) { + const normalizeWhitespace = (text: string) => text.replace(/\s+/g, " ").trim() + const normalizedFind = normalizeWhitespace(find) + + const lines = content.split("\n") + for (let i = 0; i < lines.length; i++) { + const line = lines[i] + if (normalizeWhitespace(line) === normalizedFind) { + yield line + } else { + const normalizedLine = normalizeWhitespace(line) + if (normalizedLine.includes(normalizedFind)) { + const words = find.trim().split(/\s+/) + if (words.length > 0) { + const pattern = words.map((word) => word.replace(/[.*+?^${}()|[\]\\]/g, "\\$&")).join("\\s+") + try { + const regex = new RegExp(pattern) + const match = line.match(regex) + if (match) { + yield match[0] + } + } catch { + // Invalid regex pattern, skip + } + } + } + } + } + + const findLines = find.split("\n") + if (findLines.length > 1) { + for (let i = 0; i <= lines.length - findLines.length; i++) { + const block = lines.slice(i, i + findLines.length) + if (normalizeWhitespace(block.join("\n")) === normalizedFind) { + yield block.join("\n") + } + } + } +} + +export const IndentationFlexibleReplacer: Replacer = function* (content, find) { + const removeIndentation = (text: string) => { + const lines = text.split("\n") + const nonEmptyLines = lines.filter((line) => line.trim().length > 0) + if (nonEmptyLines.length === 0) return text + + const minIndent = Math.min( + ...nonEmptyLines.map((line) => { + const match = line.match(/^(\s*)/) + return match ? match[1].length : 0 + }) + ) + + return lines.map((line) => (line.trim().length === 0 ? line : line.slice(minIndent))).join("\n") + } + + const normalizedFind = removeIndentation(find) + const contentLines = content.split("\n") + const findLines = find.split("\n") + + for (let i = 0; i <= contentLines.length - findLines.length; i++) { + const block = contentLines.slice(i, i + findLines.length).join("\n") + if (removeIndentation(block) === normalizedFind) { + yield block + } + } +} + +export const EscapeNormalizedReplacer: Replacer = function* (content, find) { + const unescapeString = (str: string): string => { + return str.replace(/\\(n|t|r|'|"|`|\\|\n|\$)/g, (match, capturedChar) => { + switch (capturedChar) { + case "n": + return "\n" + case "t": + return "\t" + case "r": + return "\r" + case "'": + return "'" + case '"': + return '"' + case "`": + return "`" + case "\\": + return "\\" + case "\n": + return "\n" + case "$": + return "$" + default: + return match + } + }) + } + + const unescapedFind = unescapeString(find) + + if (content.includes(unescapedFind)) { + yield unescapedFind + } + + const lines = content.split("\n") + const findLines = unescapedFind.split("\n") + + for (let i = 0; i <= lines.length - findLines.length; i++) { + const block = lines.slice(i, i + findLines.length).join("\n") + const unescapedBlock = unescapeString(block) + + if (unescapedBlock === unescapedFind) { + yield block + } + } +} + +export const MultiOccurrenceReplacer: Replacer = function* (content, find) { + let startIndex = 0 + + while (true) { + const index = content.indexOf(find, startIndex) + if (index === -1) break + + yield find + startIndex = index + find.length + } +} + +export const TrimmedBoundaryReplacer: Replacer = function* (content, find) { + const trimmedFind = find.trim() + + if (trimmedFind === find) { + return + } + + if (content.includes(trimmedFind)) { + yield trimmedFind + } + + const lines = content.split("\n") + const findLines = find.split("\n") + + for (let i = 0; i <= lines.length - findLines.length; i++) { + const block = lines.slice(i, i + findLines.length).join("\n") + + if (block.trim() === trimmedFind) { + yield block + } + } +} + +export const ContextAwareReplacer: Replacer = function* (content, find) { + const findLines = find.split("\n") + if (findLines.length < 3) { + return + } + + if (findLines[findLines.length - 1] === "") { + findLines.pop() + } + + const contentLines = content.split("\n") + + const firstLine = findLines[0].trim() + const lastLine = findLines[findLines.length - 1].trim() + + for (let i = 0; i < contentLines.length; i++) { + if (contentLines[i].trim() !== firstLine) continue + + for (let j = i + 2; j < contentLines.length; j++) { + if (contentLines[j].trim() === lastLine) { + const blockLines = contentLines.slice(i, j + 1) + const block = blockLines.join("\n") + + if (blockLines.length === findLines.length) { + let matchingLines = 0 + let totalNonEmptyLines = 0 + + for (let k = 1; k < blockLines.length - 1; k++) { + const blockLine = blockLines[k].trim() + const findLine = findLines[k].trim() + + if (blockLine.length > 0 || findLine.length > 0) { + totalNonEmptyLines++ + if (blockLine === findLine) { + matchingLines++ + } + } + } + + if (totalNonEmptyLines === 0 || matchingLines / totalNonEmptyLines >= 0.5) { + yield block + break + } + } + break + } + } + } +} + +export function replace( + content: string, + oldString: string, + newString: string, + replaceAll = false +): string { + if (oldString === newString) { + throw new Error("oldString and newString must be different") + } + + let notFound = true + + for (const replacer of [ + SimpleReplacer, + LineTrimmedReplacer, + BlockAnchorReplacer, + WhitespaceNormalizedReplacer, + IndentationFlexibleReplacer, + EscapeNormalizedReplacer, + TrimmedBoundaryReplacer, + ContextAwareReplacer, + MultiOccurrenceReplacer, + ]) { + for (const search of replacer(content, oldString)) { + const index = content.indexOf(search) + if (index === -1) continue + notFound = false + if (replaceAll) { + return content.replaceAll(search, newString) + } + const lastIndex = content.lastIndexOf(search) + if (index !== lastIndex) continue + return content.substring(0, index) + newString + content.substring(index + search.length) + } + } + + if (notFound) { + throw new Error("oldString not found in content") + } + throw new Error( + "Found multiple matches for oldString. Provide more surrounding lines in oldString to identify the correct match." + ) +} diff --git a/packages/kiana-v6/src/tool/glob.ts b/packages/kiana-v6/src/tool/glob.ts new file mode 100644 index 00000000000..f65f85d9f99 --- /dev/null +++ b/packages/kiana-v6/src/tool/glob.ts @@ -0,0 +1,78 @@ +import { z } from "zod" +import * as fs from "node:fs" +import * as path from "node:path" +import { defineTool } from "./tool.js" +import { listFiles } from "../util/ripgrep.js" + +const DESCRIPTION = `- Fast file pattern matching tool that works with any codebase size +- Supports glob patterns like "**/*.js" or "src/**/*.ts" +- Returns matching file paths sorted by modification time +- Use this tool when you need to find files by name patterns +- When you are doing an open ended search that may require multiple rounds of globbing and grepping, use the Task tool instead +- You have the capability to call multiple tools in a single response. It is always better to speculatively perform multiple searches as a batch that are potentially useful.` + +export const globTool = defineTool("glob", { + description: DESCRIPTION, + parameters: z.object({ + pattern: z.string().describe("The glob pattern to match files against"), + path: z + .string() + .optional() + .describe( + `The directory to search in. If not specified, the current working directory will be used. IMPORTANT: Omit this field to use the default directory. DO NOT enter "undefined" or "null" - simply omit it for the default behavior. Must be a valid directory path if provided.` + ), + }), + async execute(params, ctx) { + let search = params.path ?? ctx.workingDirectory + search = path.isAbsolute(search) ? search : path.resolve(ctx.workingDirectory, search) + + const limit = 100 + const files: Array<{ path: string; mtime: number }> = [] + let truncated = false + + for await (const file of listFiles({ + cwd: search, + glob: [params.pattern], + })) { + if (files.length >= limit) { + truncated = true + break + } + const full = path.resolve(search, file) + let mtime = 0 + try { + const stats = fs.statSync(full) + mtime = stats.mtime.getTime() + } catch { + // File might have been deleted + } + files.push({ + path: full, + mtime, + }) + } + + // Sort by modification time (newest first) + files.sort((a, b) => b.mtime - a.mtime) + + const output: string[] = [] + if (files.length === 0) { + output.push("No files found") + } else { + output.push(...files.map((f) => f.path)) + if (truncated) { + output.push("") + output.push("(Results are truncated. Consider using a more specific path or pattern.)") + } + } + + return { + title: path.relative(ctx.workingDirectory, search), + metadata: { + count: files.length, + truncated, + }, + output: output.join("\n"), + } + }, +}) diff --git a/packages/kiana-v6/src/tool/grep.ts b/packages/kiana-v6/src/tool/grep.ts new file mode 100644 index 00000000000..bec2583e903 --- /dev/null +++ b/packages/kiana-v6/src/tool/grep.ts @@ -0,0 +1,113 @@ +import { z } from "zod" +import * as fs from "node:fs" +import * as path from "node:path" +import { defineTool } from "./tool.js" +import { searchContent } from "../util/ripgrep.js" + +const MAX_LINE_LENGTH = 2000 + +const DESCRIPTION = `- Fast content search tool that works with any codebase size +- Searches file contents using regular expressions +- Supports full regex syntax (eg. "log.*Error", "function\\s+\\w+", etc.) +- Filter files by pattern with the include parameter (eg. "*.js", "*.{ts,tsx}") +- Returns file paths with at least one match sorted by modification time +- Use this tool when you need to find files containing specific patterns +- If you need to identify/count the number of matches within files, use the Bash tool with \`rg\` (ripgrep) directly. Do NOT use \`grep\`. +- When you are doing an open ended search that may require multiple rounds of globbing and grepping, use the Task tool instead` + +export const grepTool = defineTool("grep", { + description: DESCRIPTION, + parameters: z.object({ + pattern: z.string().describe("The regex pattern to search for in file contents"), + path: z + .string() + .optional() + .describe("The directory to search in. Defaults to the current working directory."), + include: z + .string() + .optional() + .describe('File pattern to include in the search (e.g. "*.js", "*.{ts,tsx}")'), + }), + async execute(params, ctx) { + if (!params.pattern) { + throw new Error("pattern is required") + } + + const searchPath = params.path || ctx.workingDirectory + + const rawMatches = await searchContent({ + cwd: searchPath, + pattern: params.pattern, + glob: params.include, + }) + + // Add modification time to matches + const matches: Array<{ + path: string + modTime: number + lineNum: number + lineText: string + }> = [] + + for (const match of rawMatches) { + let modTime = 0 + try { + const stats = fs.statSync(match.path) + modTime = stats.mtime.getTime() + } catch { + // File might have been deleted + } + matches.push({ + ...match, + modTime, + }) + } + + // Sort by modification time (newest first) + matches.sort((a, b) => b.modTime - a.modTime) + + const limit = 100 + const truncated = matches.length > limit + const finalMatches = truncated ? matches.slice(0, limit) : matches + + if (finalMatches.length === 0) { + return { + title: params.pattern, + metadata: { matches: 0, truncated: false }, + output: "No files found", + } + } + + const outputLines = [`Found ${finalMatches.length} matches`] + + let currentFile = "" + for (const match of finalMatches) { + if (currentFile !== match.path) { + if (currentFile !== "") { + outputLines.push("") + } + currentFile = match.path + outputLines.push(`${match.path}:`) + } + const truncatedLineText = + match.lineText.length > MAX_LINE_LENGTH + ? match.lineText.substring(0, MAX_LINE_LENGTH) + "..." + : match.lineText + outputLines.push(` Line ${match.lineNum}: ${truncatedLineText}`) + } + + if (truncated) { + outputLines.push("") + outputLines.push("(Results are truncated. Consider using a more specific path or pattern.)") + } + + return { + title: params.pattern, + metadata: { + matches: finalMatches.length, + truncated, + }, + output: outputLines.join("\n"), + } + }, +}) diff --git a/packages/kiana-v6/src/tool/index.ts b/packages/kiana-v6/src/tool/index.ts new file mode 100644 index 00000000000..ed66fa39c3f --- /dev/null +++ b/packages/kiana-v6/src/tool/index.ts @@ -0,0 +1,111 @@ +// Tool exports +export { defineTool, type Tool, type ToolContext, type ToolResult } from "./tool.js" + +// Individual tool exports +export { bashTool } from "./bash.js" +export { readTool } from "./read.js" +export { writeTool } from "./write.js" +export { editTool } from "./edit.js" +export { globTool } from "./glob.js" +export { grepTool } from "./grep.js" +export { listTool } from "./list.js" +export { webfetchTool } from "./webfetch.js" +export { websearchTool } from "./websearch.js" +export { codesearchTool } from "./codesearch.js" +export { todoWriteTool, todoReadTool, getTodos, setTodos, type TodoInfo } from "./todo.js" +export { + taskTool, + setSubagentExecutor, + getSubagentExecutor, + getAgentConfig, + getAvailableAgentTypes, + type SubagentExecutor, +} from "./task.js" +export { invalidTool } from "./invalid.js" + +import type { Tool } from "./tool.js" +import { bashTool } from "./bash.js" +import { readTool } from "./read.js" +import { writeTool } from "./write.js" +import { editTool } from "./edit.js" +import { globTool } from "./glob.js" +import { grepTool } from "./grep.js" +import { listTool } from "./list.js" +import { webfetchTool } from "./webfetch.js" +import { websearchTool } from "./websearch.js" +import { codesearchTool } from "./codesearch.js" +import { todoWriteTool, todoReadTool } from "./todo.js" +import { taskTool } from "./task.js" +import { invalidTool } from "./invalid.js" + +// All available tools +export const ALL_TOOLS: Record = { + bash: bashTool, + read: readTool, + write: writeTool, + edit: editTool, + glob: globTool, + grep: grepTool, + list: listTool, + webfetch: webfetchTool, + websearch: websearchTool, + codesearch: codesearchTool, + todowrite: todoWriteTool, + todoread: todoReadTool, + task: taskTool, + invalid: invalidTool, +} + +// Default tools (excludes invalid which is for error handling) +export const DEFAULT_TOOLS: string[] = [ + "bash", + "read", + "write", + "edit", + "glob", + "grep", + "list", + "webfetch", + "websearch", + "codesearch", + "todowrite", + "todoread", + "task", +] + +// Tool categories for organization +export const TOOL_CATEGORIES = { + filesystem: ["read", "write", "edit", "glob", "grep", "list"], + execution: ["bash"], + web: ["webfetch", "websearch", "codesearch"], + planning: ["todowrite", "todoread"], + subagent: ["task"], + internal: ["invalid"], +} as const + +/** + * Get a subset of tools by name + */ +export function getTools(names: string[]): Record { + const result: Record = {} + for (const name of names) { + const tool = ALL_TOOLS[name] + if (tool) { + result[name] = tool + } + } + return result +} + +/** + * Get all tools except specified ones + */ +export function getToolsExcept(exclude: string[]): Record { + const result: Record = {} + for (const [name, tool] of Object.entries(ALL_TOOLS)) { + if (!exclude.includes(name)) { + result[name] = tool + } + } + return result +} diff --git a/packages/kiana-v6/src/tool/invalid.ts b/packages/kiana-v6/src/tool/invalid.ts new file mode 100644 index 00000000000..2f0329f08c4 --- /dev/null +++ b/packages/kiana-v6/src/tool/invalid.ts @@ -0,0 +1,21 @@ +import { z } from "zod" +import { defineTool } from "./tool.js" + +/** + * Invalid tool - receives redirected parse errors from experimental_repairToolCall. + * This allows the LLM to see the error as a normal tool result and retry with corrected arguments. + */ +export const invalidTool = defineTool("invalid", { + description: "Do not use", + parameters: z.object({ + tool: z.string(), + error: z.string(), + }), + async execute(params) { + return { + title: "Invalid Tool", + output: `The arguments provided to the tool are invalid: ${params.error}`, + metadata: {}, + } + }, +}) diff --git a/packages/kiana-v6/src/tool/list.ts b/packages/kiana-v6/src/tool/list.ts new file mode 100644 index 00000000000..932503ef404 --- /dev/null +++ b/packages/kiana-v6/src/tool/list.ts @@ -0,0 +1,115 @@ +import { z } from "zod" +import * as path from "node:path" +import { defineTool } from "./tool.js" +import { listFiles } from "../util/ripgrep.js" + +const IGNORE_PATTERNS = [ + "node_modules/", + "__pycache__/", + ".git/", + "dist/", + "build/", + "target/", + "vendor/", + "bin/", + "obj/", + ".idea/", + ".vscode/", + ".zig-cache/", + "zig-out", + ".coverage", + "coverage/", + "tmp/", + "temp/", + ".cache/", + "cache/", + "logs/", + ".venv/", + "venv/", + "env/", +] + +const LIMIT = 100 + +const DESCRIPTION = `Lists files and directories in a given path. The path parameter must be absolute; omit it to use the current workspace directory. You can optionally provide an array of glob patterns to ignore with the ignore parameter. You should generally prefer the Glob and Grep tools, if you know which directories to search.` + +export const listTool = defineTool("list", { + description: DESCRIPTION, + parameters: z.object({ + path: z + .string() + .describe("The absolute path to the directory to list (must be absolute, not relative)") + .optional(), + ignore: z.array(z.string()).describe("List of glob patterns to ignore").optional(), + }), + async execute(params, ctx) { + const searchPath = path.resolve(ctx.workingDirectory, params.path || ".") + + const ignoreGlobs = IGNORE_PATTERNS.map((p) => `!${p}*`).concat( + params.ignore?.map((p) => `!${p}`) || [] + ) + + const files: string[] = [] + for await (const file of listFiles({ cwd: searchPath, glob: ignoreGlobs })) { + files.push(file) + if (files.length >= LIMIT) break + } + + // Build directory structure + const dirs = new Set() + const filesByDir = new Map() + + for (const file of files) { + const dir = path.dirname(file) + const parts = dir === "." ? [] : dir.split("/") + + // Add all parent directories + for (let i = 0; i <= parts.length; i++) { + const dirPath = i === 0 ? "." : parts.slice(0, i).join("/") + dirs.add(dirPath) + } + + // Add file to its directory + if (!filesByDir.has(dir)) filesByDir.set(dir, []) + filesByDir.get(dir)!.push(path.basename(file)) + } + + function renderDir(dirPath: string, depth: number): string { + const indent = " ".repeat(depth) + let output = "" + + if (depth > 0) { + output += `${indent}${path.basename(dirPath)}/\n` + } + + const childIndent = " ".repeat(depth + 1) + const children = Array.from(dirs) + .filter((d) => path.dirname(d) === dirPath && d !== dirPath) + .sort() + + // Render subdirectories first + for (const child of children) { + output += renderDir(child, depth + 1) + } + + // Render files + const dirFiles = filesByDir.get(dirPath) || [] + for (const file of dirFiles.sort()) { + output += `${childIndent}${file}\n` + } + + return output + } + + const output = `${searchPath}/\n` + renderDir(".", 0) + + return { + title: path.relative(ctx.workingDirectory, searchPath) || ".", + metadata: { + count: files.length, + truncated: files.length >= LIMIT, + }, + output, + } + }, +}) diff --git a/packages/kiana-v6/src/tool/read.ts b/packages/kiana-v6/src/tool/read.ts new file mode 100644 index 00000000000..c838df640e0 --- /dev/null +++ b/packages/kiana-v6/src/tool/read.ts @@ -0,0 +1,259 @@ +import { z } from "zod" +import * as fs from "node:fs" +import * as path from "node:path" +import { defineTool } from "./tool.js" + +const DEFAULT_READ_LIMIT = 2000 +const MAX_LINE_LENGTH = 2000 + +const DESCRIPTION = `Reads a file from the local filesystem. You can access any file directly by using this tool. +Assume this tool is able to read all files on the machine. If the User provides a path to a file assume that path is valid. It is okay to read a file that does not exist; an error will be returned. + +Usage: +- The filePath parameter must be an absolute path, not a relative path +- By default, it reads up to 2000 lines starting from the beginning of the file +- You can optionally specify a line offset and limit (especially handy for long files), but it's recommended to read the whole file by not providing these parameters +- Any lines longer than 2000 characters will be truncated +- Results are returned using cat -n format, with line numbers starting at 1 +- You have the capability to call multiple tools in a single response. It is always better to speculatively read multiple files as a batch that are potentially useful. +- If you read a file that exists but has empty contents you will receive a system reminder warning in place of file contents. +- You can read image files using this tool.` + +export const readTool = defineTool("read", { + description: DESCRIPTION, + parameters: z.object({ + filePath: z.string().describe("The path to the file to read"), + offset: z.coerce + .number() + .describe("The line number to start reading from (0-based)") + .optional(), + limit: z.coerce + .number() + .describe("The number of lines to read (defaults to 2000)") + .optional(), + }), + async execute(params, ctx) { + let filepath = params.filePath + if (!path.isAbsolute(filepath)) { + filepath = path.join(ctx.workingDirectory, filepath) + } + const title = path.relative(ctx.workingDirectory, filepath) + + // Block .env files (except samples/examples) + const whitelist = [".env.sample", ".example"] + const block = (() => { + if (whitelist.some((w) => filepath.endsWith(w))) return false + if (filepath.includes(".env")) return true + return false + })() + + if (block) { + throw new Error( + `The user has blocked you from reading ${filepath}, DO NOT make further attempts to read it` + ) + } + + // Check file exists + if (!fs.existsSync(filepath)) { + const dir = path.dirname(filepath) + const base = path.basename(filepath) + + try { + const dirEntries = fs.readdirSync(dir) + const suggestions = dirEntries + .filter( + (entry) => + entry.toLowerCase().includes(base.toLowerCase()) || + base.toLowerCase().includes(entry.toLowerCase()) + ) + .map((entry) => path.join(dir, entry)) + .slice(0, 3) + + if (suggestions.length > 0) { + throw new Error( + `File not found: ${filepath}\n\nDid you mean one of these?\n${suggestions.join("\n")}` + ) + } + } catch (e) { + if ((e as Error).message.startsWith("File not found:")) throw e + // Directory doesn't exist, fall through + } + + throw new Error(`File not found: ${filepath}`) + } + + const stats = fs.statSync(filepath) + if (stats.isDirectory()) { + throw new Error(`Path is a directory, not a file: ${filepath}`) + } + + // Check if image + const imageType = isImageFile(filepath) + if (imageType) { + const content = fs.readFileSync(filepath) + const mime = getMimeType(filepath) + const msg = "Image read successfully" + return { + title, + output: msg, + metadata: { preview: msg }, + attachments: [ + { + id: `part_${Date.now()}`, + sessionID: ctx.sessionID, + messageID: ctx.messageID, + type: "file" as const, + mime, + url: `data:${mime};base64,${content.toString("base64")}`, + }, + ], + } + } + + // Check if binary + if (isBinaryFile(filepath, stats.size)) { + throw new Error(`Cannot read binary file: ${filepath}`) + } + + // Read text file + const content = fs.readFileSync(filepath, "utf-8") + const lines = content.split("\n") + const limit = params.limit ?? DEFAULT_READ_LIMIT + const offset = params.offset || 0 + + const raw = lines.slice(offset, offset + limit).map((line) => { + return line.length > MAX_LINE_LENGTH + ? line.substring(0, MAX_LINE_LENGTH) + "..." + : line + }) + + // Format with line numbers (matching cat -n format: padded line number + tab) + const numbered = raw.map((line, index) => { + return `${(index + offset + 1).toString().padStart(5, " ")}\t${line}` + }) + + const preview = raw.slice(0, 20).join("\n") + + let output = "\n" + output += numbered.join("\n") + + const totalLines = lines.length + const lastReadLine = offset + raw.length + const hasMoreLines = totalLines > lastReadLine + + if (hasMoreLines) { + output += `\n\n(File has more lines. Use 'offset' parameter to read beyond line ${lastReadLine})` + } else { + output += `\n\n(End of file - total ${totalLines} lines)` + } + output += "\n" + + return { + title, + output, + metadata: { preview }, + } + }, +}) + +function isImageFile(filePath: string): string | false { + const ext = path.extname(filePath).toLowerCase() + switch (ext) { + case ".jpg": + case ".jpeg": + return "JPEG" + case ".png": + return "PNG" + case ".gif": + return "GIF" + case ".bmp": + return "BMP" + case ".webp": + return "WebP" + default: + return false + } +} + +function getMimeType(filePath: string): string { + const ext = path.extname(filePath).toLowerCase() + switch (ext) { + case ".jpg": + case ".jpeg": + return "image/jpeg" + case ".png": + return "image/png" + case ".gif": + return "image/gif" + case ".bmp": + return "image/bmp" + case ".webp": + return "image/webp" + default: + return "application/octet-stream" + } +} + +function isBinaryFile(filepath: string, fileSize: number): boolean { + const ext = path.extname(filepath).toLowerCase() + + // Known binary extensions + const binaryExtensions = new Set([ + ".zip", + ".tar", + ".gz", + ".exe", + ".dll", + ".so", + ".class", + ".jar", + ".war", + ".7z", + ".doc", + ".docx", + ".xls", + ".xlsx", + ".ppt", + ".pptx", + ".odt", + ".ods", + ".odp", + ".bin", + ".dat", + ".obj", + ".o", + ".a", + ".lib", + ".wasm", + ".pyc", + ".pyo", + ]) + + if (binaryExtensions.has(ext)) { + return true + } + + if (fileSize === 0) return false + + // Read first 4KB and check for binary content + const bufferSize = Math.min(4096, fileSize) + const buffer = Buffer.alloc(bufferSize) + const fd = fs.openSync(filepath, "r") + const bytesRead = fs.readSync(fd, buffer, 0, bufferSize, 0) + fs.closeSync(fd) + + if (bytesRead === 0) return false + + let nonPrintableCount = 0 + for (let i = 0; i < bytesRead; i++) { + // Null byte is definite binary indicator + if (buffer[i] === 0) return true + // Count non-printable characters (excluding common whitespace) + if (buffer[i] < 9 || (buffer[i] > 13 && buffer[i] < 32)) { + nonPrintableCount++ + } + } + + // If >30% non-printable, consider binary + return nonPrintableCount / bytesRead > 0.3 +} diff --git a/packages/kiana-v6/src/tool/task.ts b/packages/kiana-v6/src/tool/task.ts new file mode 100644 index 00000000000..ee42846c255 --- /dev/null +++ b/packages/kiana-v6/src/tool/task.ts @@ -0,0 +1,176 @@ +import { z } from "zod" +import { defineTool, ToolContext } from "./tool.js" + +const DESCRIPTION = `Launch a new agent to handle complex, multi-step tasks autonomously. + +Available agent types: +- general-purpose: General-purpose agent for researching complex questions, searching for code, and executing multi-step tasks. +- explore: Fast agent specialized for exploring codebases. Use this when you need to quickly find files by patterns, search code for keywords, or answer questions about the codebase. + +When using the Task tool, you must specify a subagent_type parameter to select which agent type to use. + +When to use the Task tool: +- When you are instructed to execute custom slash commands +- When exploring large codebases +- When tasks require multiple rounds of searching and analysis + +When NOT to use the Task tool: +- If you want to read a specific file path, use the Read or Glob tool instead +- If you are searching for a specific class definition, use the Glob tool instead +- If you are searching for code within a specific file or set of 2-3 files, use the Read tool instead + +Usage notes: +1. Launch multiple agents concurrently whenever possible for maximum performance +2. When the agent is done, it will return a single message back to you +3. Each agent invocation is stateless - provide a highly detailed task description +4. The agent's outputs should generally be trusted +5. Clearly tell the agent whether you expect it to write code or just to do research` + +// Agent configuration for subagents +interface AgentConfig { + name: string + description: string + systemPrompt: string +} + +const AGENTS: Record = { + "general-purpose": { + name: "general-purpose", + description: "General-purpose agent for researching complex questions and executing multi-step tasks", + systemPrompt: `You are a helpful coding assistant running as a subagent. Complete the task autonomously and return a concise summary of your findings or actions. + +When searching for code or files: +- Use Glob to find files by patterns +- Use Grep to search file contents +- Use Read to examine specific files + +Be thorough but efficient. Return only the essential information needed to answer the question or complete the task.`, + }, + explore: { + name: "explore", + description: "Fast agent specialized for exploring codebases", + systemPrompt: `You are a fast code exploration agent. Your job is to quickly find files and code patterns in the codebase. + +Use these tools efficiently: +- Glob: Find files by pattern (e.g., "**/*.ts", "src/**/*.tsx") +- Grep: Search file contents for keywords or patterns +- Read: Examine specific files when needed + +Be quick and focused. Return a concise summary of what you found.`, + }, +} + +// Subagent session executor type - will be injected by the session manager +export type SubagentExecutor = (params: { + sessionID: string + parentSessionID: string + prompt: string + agentConfig: AgentConfig + agentType: string + tools: Record + depth: number // nesting level (1 = direct subagent, 2 = subagent of subagent) +}) => Promise<{ + output: string + sessionID: string +}> + +// This will be set by the session manager +let subagentExecutor: SubagentExecutor | null = null + +export function setSubagentExecutor(executor: SubagentExecutor) { + subagentExecutor = executor +} + +export function getSubagentExecutor(): SubagentExecutor | null { + return subagentExecutor +} + +export function getAgentConfig(agentType: string): AgentConfig | undefined { + return AGENTS[agentType] +} + +export function getAvailableAgentTypes(): string[] { + return Object.keys(AGENTS) +} + +export const taskTool = defineTool("task", { + description: DESCRIPTION, + parameters: z.object({ + description: z.string().describe("A short (3-5 words) description of the task"), + prompt: z.string().describe("The task for the agent to perform"), + subagent_type: z.string().describe("The type of specialized agent to use for this task"), + session_id: z.string().describe("Existing Task session to continue").optional(), + }), + async execute(params, ctx) { + const agent = AGENTS[params.subagent_type] + if (!agent) { + const availableAgents = Object.keys(AGENTS).join(", ") + throw new Error( + `Unknown agent type: ${params.subagent_type}. Available agents: ${availableAgents}` + ) + } + + // Update metadata to show task in progress + ctx.metadata({ + title: params.description, + metadata: { + agentType: params.subagent_type, + status: "running", + }, + }) + + if (!subagentExecutor) { + // Fallback: Return a message indicating subagent execution is not available + // This happens when the task tool is used without a proper session manager + return { + title: params.description, + metadata: { + agentType: params.subagent_type, + status: "skipped", + }, + output: `Subagent execution is not available in this context. Task: ${params.prompt}`, + } + } + + try { + const result = await subagentExecutor({ + sessionID: params.session_id || `task-${Date.now()}`, + parentSessionID: ctx.sessionID, + prompt: params.prompt, + agentConfig: agent, + agentType: params.subagent_type, + tools: { + todowrite: false, + todoread: false, + task: false, // Prevent recursive task calls + }, + depth: 1, // Direct subagent call from parent + }) + + const output = + result.output + + "\n\n" + + ["", `session_id: ${result.sessionID}`, ""].join("\n") + + return { + title: params.description, + metadata: { + sessionId: result.sessionID, + status: "completed", + }, + output, + } + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error) + return { + title: params.description, + metadata: { + agentType: params.subagent_type, + status: "error", + error: errorMessage, + }, + output: `Task failed: ${errorMessage}`, + } + } + }, +}) diff --git a/packages/kiana-v6/src/tool/todo.ts b/packages/kiana-v6/src/tool/todo.ts new file mode 100644 index 00000000000..649b36c17c3 --- /dev/null +++ b/packages/kiana-v6/src/tool/todo.ts @@ -0,0 +1,109 @@ +import { z } from "zod" +import { defineTool } from "./tool.js" + +const DESCRIPTION_WRITE = `Use this tool to create and manage a structured task list for your current coding session. This helps you track progress, organize complex tasks, and demonstrate thoroughness to the user. +It also helps the user understand the progress of the task and overall progress of their requests. + +## When to Use This Tool +Use this tool proactively in these scenarios: + +1. Complex multi-step tasks - When a task requires 3 or more distinct steps or actions +2. Non-trivial and complex tasks - Tasks that require careful planning or multiple operations +3. User explicitly requests todo list - When the user directly asks you to use the todo list +4. User provides multiple tasks - When users provide a list of things to be done (numbered or comma-separated) +5. After receiving new instructions - Immediately capture user requirements as todos. Feel free to edit the todo list based on new information. +6. After completing a task - Mark it complete and add any new follow-up tasks +7. When you start working on a new task, mark the todo as in_progress. Ideally you should only have one todo as in_progress at a time. Complete existing tasks before starting new ones. + +## When NOT to Use This Tool + +Skip using this tool when: +1. There is only a single, straightforward task +2. The task is trivial and tracking it provides no organizational benefit +3. The task can be completed in less than 3 trivial steps +4. The task is purely conversational or informational + +NOTE that you should not use this tool if there is only one trivial task to do. In this case you are better off just doing the task directly. + +## Task States and Management + +1. **Task States**: Use these states to track progress: + - pending: Task not yet started + - in_progress: Currently working on (limit to ONE task at a time) + - completed: Task finished successfully + +2. **Task Management**: + - Update task status in real-time as you work + - Mark tasks complete IMMEDIATELY after finishing (don't batch completions) + - Only have ONE task in_progress at any time + - Complete current tasks before starting new ones + +3. **Task Breakdown**: + - Create specific, actionable items + - Break complex tasks into smaller, manageable steps + - Use clear, descriptive task names + +When in doubt, use this tool. Being proactive with task management demonstrates attentiveness and ensures you complete all requirements successfully.` + +const DESCRIPTION_READ = `Use this tool to read your current todo list` + +// Schema for todo items +const TodoInfoSchema = z.object({ + content: z.string().describe("The task description"), + status: z + .enum(["pending", "in_progress", "completed"]) + .describe("The task status"), + activeForm: z + .string() + .describe("The present continuous form of the task (e.g., 'Implementing feature')"), +}) + +export type TodoInfo = z.infer + +// In-memory todo storage per session +const todoStorage = new Map() + +export function getTodos(sessionID: string): TodoInfo[] { + return todoStorage.get(sessionID) || [] +} + +export function setTodos(sessionID: string, todos: TodoInfo[]): void { + todoStorage.set(sessionID, todos) +} + +export const todoWriteTool = defineTool("todowrite", { + description: DESCRIPTION_WRITE, + parameters: z.object({ + todos: z.array(TodoInfoSchema).describe("The updated todo list"), + }), + async execute(params, ctx) { + setTodos(ctx.sessionID, params.todos) + + const pendingCount = params.todos.filter((x) => x.status !== "completed").length + + return { + title: `${pendingCount} todos`, + output: JSON.stringify(params.todos, null, 2), + metadata: { + todos: params.todos, + }, + } + }, +}) + +export const todoReadTool = defineTool("todoread", { + description: DESCRIPTION_READ, + parameters: z.object({}), + async execute(_params, ctx) { + const todos = getTodos(ctx.sessionID) + const pendingCount = todos.filter((x) => x.status !== "completed").length + + return { + title: `${pendingCount} todos`, + metadata: { + todos, + }, + output: JSON.stringify(todos, null, 2), + } + }, +}) diff --git a/packages/kiana-v6/src/tool/tool.ts b/packages/kiana-v6/src/tool/tool.ts new file mode 100644 index 00000000000..a184fec2b26 --- /dev/null +++ b/packages/kiana-v6/src/tool/tool.ts @@ -0,0 +1,77 @@ +import { z } from "zod" +import type { FilePart } from "../types/part.js" + +/** + * Tool execution context + */ +export interface ToolContext { + sessionID: string + messageID: string + abort: AbortSignal + callID?: string + workingDirectory: string + metadata(input: { title?: string; metadata?: Record }): void +} + +/** + * Tool execution result + */ +export interface ToolResult { + title: string + output: string + metadata: Record + attachments?: FilePart[] +} + +/** + * Tool definition + */ +export interface Tool

{ + id: string + description: string + parameters: P + execute(args: z.infer

, ctx: ToolContext): Promise +} + +/** + * Define a tool with validation + */ +export function defineTool

( + id: string, + config: { + description: string + parameters: P + execute(args: z.infer

, ctx: ToolContext): Promise + } +): Tool

{ + return { + id, + description: config.description, + parameters: config.parameters, + execute: async (args, ctx) => { + // TODO: Temporarily disabled to test if AI SDK v6 handles this + // // Normalize args: some models erroneously stringify tool inputs; attempt to JSON.parse when string + // let normalizedArgs: unknown = args + // if (typeof args === "string") { + // try { + // normalizedArgs = JSON.parse(args) + // } catch { + // // Fall through to validation error; safeParse will surface a clear message + // normalizedArgs = args + // } + // } + + // Validate parameters + const result = config.parameters.safeParse(args) + if (!result.success) { + const errors = result.error.issues + .map((e: z.ZodIssue) => `${e.path.join(".")}: ${e.message}`) + .join(", ") + throw new Error( + `The ${id} tool was called with invalid arguments: ${errors}. Please fix the input.` + ) + } + return config.execute(result.data, ctx) + }, + } +} diff --git a/packages/kiana-v6/src/tool/webfetch.ts b/packages/kiana-v6/src/tool/webfetch.ts new file mode 100644 index 00000000000..00b7a57a206 --- /dev/null +++ b/packages/kiana-v6/src/tool/webfetch.ts @@ -0,0 +1,242 @@ +import { z } from "zod" +import { defineTool } from "./tool.js" + +const MAX_RESPONSE_SIZE = 5 * 1024 * 1024 // 5MB +const DEFAULT_TIMEOUT = 30 * 1000 // 30 seconds +const MAX_TIMEOUT = 120 * 1000 // 2 minutes + +const DESCRIPTION = `- Fetches content from a specified URL +- Takes a URL and a format as input +- Fetches the URL content, converts HTML to markdown +- Returns the content in the specified format +- Use this tool when you need to retrieve and analyze web content + +Usage notes: + - IMPORTANT: if another tool is present that offers better web fetching capabilities, is more targeted to the task, or has fewer restrictions, prefer using that tool instead of this one. + - The URL must be a fully-formed valid URL + - HTTP URLs will be automatically upgraded to HTTPS + - This tool is read-only and does not modify any files + - Results may be summarized if the content is very large + - Includes a self-cleaning 15-minute cache for faster responses when repeatedly accessing the same URL` + +// Simple cache for responses +const cache = new Map() +const CACHE_TTL = 15 * 60 * 1000 // 15 minutes + +function cleanCache() { + const now = Date.now() + for (const [key, value] of cache) { + if (now - value.timestamp > CACHE_TTL) { + cache.delete(key) + } + } +} + +export const webfetchTool = defineTool("webfetch", { + description: DESCRIPTION, + parameters: z.object({ + url: z.string().describe("The URL to fetch content from"), + format: z + .enum(["text", "markdown", "html"]) + .describe("The format to return the content in (text, markdown, or html)"), + timeout: z.number().describe("Optional timeout in seconds (max 120)").optional(), + }), + async execute(params, ctx) { + // Validate URL + if (!params.url.startsWith("http://") && !params.url.startsWith("https://")) { + throw new Error("URL must start with http:// or https://") + } + + // Check cache + cleanCache() + const cacheKey = `${params.url}:${params.format}` + const cached = cache.get(cacheKey) + if (cached) { + const contentType = "text/html" // Cached, assume HTML + return { + output: cached.content, + title: `${params.url} (cached)`, + metadata: {}, + } + } + + const timeout = Math.min((params.timeout ?? DEFAULT_TIMEOUT / 1000) * 1000, MAX_TIMEOUT) + + const controller = new AbortController() + const timeoutId = setTimeout(() => controller.abort(), timeout) + + // Build Accept header based on requested format + let acceptHeader = "*/*" + switch (params.format) { + case "markdown": + acceptHeader = "text/markdown;q=1.0, text/x-markdown;q=0.9, text/plain;q=0.8, text/html;q=0.7, */*;q=0.1" + break + case "text": + acceptHeader = "text/plain;q=1.0, text/markdown;q=0.9, text/html;q=0.8, */*;q=0.1" + break + case "html": + acceptHeader = "text/html;q=1.0, application/xhtml+xml;q=0.9, text/plain;q=0.8, text/markdown;q=0.7, */*;q=0.1" + break + default: + acceptHeader = + "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8" + } + + let response: Response + try { + response = await fetch(params.url, { + signal: controller.signal, + headers: { + "User-Agent": + "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36", + Accept: acceptHeader, + "Accept-Language": "en-US,en;q=0.9", + }, + }) + } finally { + clearTimeout(timeoutId) + } + + if (!response.ok) { + throw new Error(`Request failed with status code: ${response.status}`) + } + + // Check content length + const contentLength = response.headers.get("content-length") + if (contentLength && parseInt(contentLength) > MAX_RESPONSE_SIZE) { + throw new Error("Response too large (exceeds 5MB limit)") + } + + const arrayBuffer = await response.arrayBuffer() + if (arrayBuffer.byteLength > MAX_RESPONSE_SIZE) { + throw new Error("Response too large (exceeds 5MB limit)") + } + + const content = new TextDecoder().decode(arrayBuffer) + const contentType = response.headers.get("content-type") || "" + + const title = `${params.url} (${contentType})` + + let output: string + + // Handle content based on requested format and actual content type + switch (params.format) { + case "markdown": + if (contentType.includes("text/html")) { + output = convertHTMLToMarkdown(content) + } else { + output = content + } + break + + case "text": + if (contentType.includes("text/html")) { + output = extractTextFromHTML(content) + } else { + output = content + } + break + + case "html": + default: + output = content + break + } + + // Cache the result + cache.set(cacheKey, { content: output, timestamp: Date.now() }) + + return { + output, + title, + metadata: {}, + } + }, +}) + +function extractTextFromHTML(html: string): string { + // Simple HTML to text conversion + let text = html + // Remove script and style tags with their content + .replace(/]*>[\s\S]*?<\/script>/gi, "") + .replace(/]*>[\s\S]*?<\/style>/gi, "") + .replace(/]*>[\s\S]*?<\/noscript>/gi, "") + // Replace common block elements with newlines + .replace(/<\/?(p|div|br|h[1-6]|li|tr)[^>]*>/gi, "\n") + // Remove all remaining tags + .replace(/<[^>]+>/g, "") + // Decode HTML entities + .replace(/ /g, " ") + .replace(/&/g, "&") + .replace(/</g, "<") + .replace(/>/g, ">") + .replace(/"/g, '"') + .replace(/&#(\d+);/g, (_, num) => String.fromCharCode(parseInt(num))) + // Clean up whitespace + .replace(/\n\s*\n/g, "\n\n") + .trim() + + return text +} + +function convertHTMLToMarkdown(html: string): string { + let md = html + // Remove script and style tags + .replace(/]*>[\s\S]*?<\/script>/gi, "") + .replace(/]*>[\s\S]*?<\/style>/gi, "") + .replace(/]*>[\s\S]*?<\/noscript>/gi, "") + + // Convert headings + md = md.replace(/]*>([\s\S]*?)<\/h1>/gi, "\n# $1\n") + md = md.replace(/]*>([\s\S]*?)<\/h2>/gi, "\n## $1\n") + md = md.replace(/]*>([\s\S]*?)<\/h3>/gi, "\n### $1\n") + md = md.replace(/]*>([\s\S]*?)<\/h4>/gi, "\n#### $1\n") + md = md.replace(/]*>([\s\S]*?)<\/h5>/gi, "\n##### $1\n") + md = md.replace(/]*>([\s\S]*?)<\/h6>/gi, "\n###### $1\n") + + // Convert links + md = md.replace(/]+href="([^"]*)"[^>]*>([\s\S]*?)<\/a>/gi, "[$2]($1)") + + // Convert images + md = md.replace(/]+src="([^"]*)"[^>]*alt="([^"]*)"[^>]*>/gi, "![$2]($1)") + md = md.replace(/]+src="([^"]*)"[^>]*>/gi, "![]($1)") + + // Convert bold and italic + md = md.replace(/<(strong|b)[^>]*>([\s\S]*?)<\/(strong|b)>/gi, "**$2**") + md = md.replace(/<(em|i)[^>]*>([\s\S]*?)<\/(em|i)>/gi, "*$2*") + + // Convert code + md = md.replace(/]*>([\s\S]*?)<\/code>/gi, "`$1`") + md = md.replace(/]*>([\s\S]*?)<\/pre>/gi, "\n```\n$1\n```\n") + + // Convert lists + md = md.replace(/]*>([\s\S]*?)<\/li>/gi, "\n- $1") + md = md.replace(/<\/?[ou]l[^>]*>/gi, "\n") + + // Convert paragraphs and divs + md = md.replace(/<\/p>/gi, "\n\n") + md = md.replace(/]*>/gi, "") + md = md.replace(//gi, "\n") + md = md.replace(/<\/div>/gi, "\n") + md = md.replace(/]*>/gi, "") + + // Convert horizontal rules + md = md.replace(/]*>/gi, "\n---\n") + + // Remove remaining tags + md = md.replace(/<[^>]+>/g, "") + + // Decode HTML entities + md = md + .replace(/ /g, " ") + .replace(/&/g, "&") + .replace(/</g, "<") + .replace(/>/g, ">") + .replace(/"/g, '"') + .replace(/&#(\d+);/g, (_, num) => String.fromCharCode(parseInt(num))) + + // Clean up whitespace + md = md.replace(/\n{3,}/g, "\n\n").trim() + + return md +} diff --git a/packages/kiana-v6/src/tool/websearch.ts b/packages/kiana-v6/src/tool/websearch.ts new file mode 100644 index 00000000000..d6fc010d99e --- /dev/null +++ b/packages/kiana-v6/src/tool/websearch.ts @@ -0,0 +1,145 @@ +import { z } from "zod" +import { defineTool } from "./tool.js" + +const DESCRIPTION = `- Search the web using Exa AI - performs real-time web searches and can scrape content from specific URLs +- Provides up-to-date information for current events and recent data +- Supports configurable result counts and returns the content from the most relevant websites +- Use this tool for accessing information beyond knowledge cutoff +- Searches are performed automatically within a single API call + +Usage notes: + - Supports live crawling modes: 'fallback' (backup if cached unavailable) or 'preferred' (prioritize live crawling) + - Search types: 'auto' (balanced), 'fast' (quick results), 'deep' (comprehensive search) + - Configurable context length for optimal LLM integration + - Domain filtering and advanced search options available` + +const API_CONFIG = { + BASE_URL: "https://mcp.exa.ai", + ENDPOINTS: { + SEARCH: "/mcp", + }, + DEFAULT_NUM_RESULTS: 8, +} as const + +interface McpSearchRequest { + jsonrpc: string + id: number + method: string + params: { + name: string + arguments: { + query: string + numResults?: number + livecrawl?: "fallback" | "preferred" + type?: "auto" | "fast" | "deep" + contextMaxCharacters?: number + } + } +} + +interface McpSearchResponse { + jsonrpc: string + result: { + content: Array<{ + type: string + text: string + }> + } +} + +export const websearchTool = defineTool("websearch", { + description: DESCRIPTION, + parameters: z.object({ + query: z.string().describe("Websearch query"), + numResults: z.number().optional().describe("Number of search results to return (default: 8)"), + livecrawl: z + .enum(["fallback", "preferred"]) + .optional() + .describe( + "Live crawl mode - 'fallback': use live crawling as backup if cached content unavailable, 'preferred': prioritize live crawling (default: 'fallback')" + ), + type: z + .enum(["auto", "fast", "deep"]) + .optional() + .describe( + "Search type - 'auto': balanced search (default), 'fast': quick results, 'deep': comprehensive search" + ), + contextMaxCharacters: z + .number() + .optional() + .describe("Maximum characters for context string optimized for LLMs (default: 10000)"), + }), + async execute(params, ctx) { + const searchRequest: McpSearchRequest = { + jsonrpc: "2.0", + id: 1, + method: "tools/call", + params: { + name: "web_search_exa", + arguments: { + query: params.query, + type: params.type || "auto", + numResults: params.numResults || API_CONFIG.DEFAULT_NUM_RESULTS, + livecrawl: params.livecrawl || "fallback", + contextMaxCharacters: params.contextMaxCharacters, + }, + }, + } + + const controller = new AbortController() + const timeoutId = setTimeout(() => controller.abort(), 25000) + + try { + const headers: Record = { + accept: "application/json, text/event-stream", + "content-type": "application/json", + } + + const signals = ctx.abort ? [controller.signal, ctx.abort] : [controller.signal] + const response = await fetch(`${API_CONFIG.BASE_URL}${API_CONFIG.ENDPOINTS.SEARCH}`, { + method: "POST", + headers, + body: JSON.stringify(searchRequest), + signal: AbortSignal.any(signals), + }) + + clearTimeout(timeoutId) + + if (!response.ok) { + const errorText = await response.text() + throw new Error(`Search error (${response.status}): ${errorText}`) + } + + const responseText = await response.text() + + // Parse SSE response + const lines = responseText.split("\n") + for (const line of lines) { + if (line.startsWith("data: ")) { + const data: McpSearchResponse = JSON.parse(line.substring(6)) + if (data.result && data.result.content && data.result.content.length > 0) { + return { + output: data.result.content[0].text, + title: `Web search: ${params.query}`, + metadata: {}, + } + } + } + } + + return { + output: "No search results found. Please try a different query.", + title: `Web search: ${params.query}`, + metadata: {}, + } + } catch (error) { + clearTimeout(timeoutId) + + if (error instanceof Error && error.name === "AbortError") { + throw new Error("Search request timed out") + } + + throw error + } + }, +}) diff --git a/packages/kiana-v6/src/tool/write.ts b/packages/kiana-v6/src/tool/write.ts new file mode 100644 index 00000000000..253e4c39c59 --- /dev/null +++ b/packages/kiana-v6/src/tool/write.ts @@ -0,0 +1,53 @@ +import { z } from "zod" +import * as fs from "node:fs" +import * as path from "node:path" +import { defineTool } from "./tool.js" + +const DESCRIPTION = `Writes a file to the local filesystem. + +Usage: +- This tool will overwrite the existing file if there is one at the provided path. +- If this is an existing file, you MUST use the Read tool first to read the file's contents. This tool will fail if you did not read the file first. +- ALWAYS prefer editing existing files in the codebase. NEVER write new files unless explicitly required. +- NEVER proactively create documentation files (*.md) or README files. Only create documentation files if explicitly requested by the User. +- Only use emojis if the user explicitly requests it. Avoid writing emojis to files unless asked.` + +export const writeTool = defineTool("write", { + description: DESCRIPTION, + parameters: z.object({ + content: z.string().describe("The content to write to the file"), + filePath: z + .string() + .describe( + "The absolute path to the file to write (must be absolute, not relative)" + ), + }), + async execute(params, ctx) { + const filepath = path.isAbsolute(params.filePath) + ? params.filePath + : path.join(ctx.workingDirectory, params.filePath) + + const exists = fs.existsSync(filepath) + + // Ensure parent directory exists + const dir = path.dirname(filepath) + if (!fs.existsSync(dir)) { + fs.mkdirSync(dir, { recursive: true }) + } + + // Write file + fs.writeFileSync(filepath, params.content, "utf-8") + + const title = path.relative(ctx.workingDirectory, filepath) + + return { + title, + metadata: { + diagnostics: {}, + filepath, + exists, + }, + output: "", + } + }, +}) diff --git a/packages/kiana-v6/src/types/event.ts b/packages/kiana-v6/src/types/event.ts new file mode 100644 index 00000000000..cc1cd11cc1e --- /dev/null +++ b/packages/kiana-v6/src/types/event.ts @@ -0,0 +1,184 @@ +import { z } from "zod" +import { SessionInfoSchema, FileDiffSchema } from "./session.js" +import { MessageInfoSchema } from "./message.js" +import { PartSchema } from "./part.js" + +// Subagent context - added to events from subagent sessions +export const SubagentContextSchema = z.object({ + parentSessionID: z.string(), + depth: z.number(), // 1 = direct subagent, 2 = subagent of subagent, etc. + agentType: z.string().optional(), // e.g., "Explore", "Plan", etc. +}) + +export type SubagentContext = z.infer + +// Session events +export const SessionCreatedEventSchema = z.object({ + type: z.literal("session.created"), + properties: z.object({ + session: SessionInfoSchema, + }), +}) + +export const SessionUpdatedEventSchema = z.object({ + type: z.literal("session.updated"), + properties: z.object({ + session: SessionInfoSchema, + }), +}) + +export const SessionDeletedEventSchema = z.object({ + type: z.literal("session.deleted"), + properties: z.object({ + session: SessionInfoSchema, + }), +}) + +export const SessionIdleEventSchema = z.object({ + type: z.literal("session.idle"), + properties: z.object({ + sessionID: z.string(), + }), +}) + +export const SessionCompactedEventSchema = z.object({ + type: z.literal("session.compacted"), + properties: z.object({ + sessionID: z.string(), + }), +}) + +export const SessionDiffEventSchema = z.object({ + type: z.literal("session.diff"), + properties: z.object({ + sessionID: z.string(), + diff: z.array(FileDiffSchema), + }), +}) + +export const SessionStatusEventSchema = z.object({ + type: z.literal("session.status"), + properties: z.object({ + sessionID: z.string(), + status: z.object({ + state: z.enum(["idle", "running", "error"]), + message: z.string().optional(), + }), + }), +}) + +// Message events +export const MessageCreatedEventSchema = z.object({ + type: z.literal("message.created"), + properties: z.object({ + message: MessageInfoSchema, + }), +}) + +export const MessageUpdatedEventSchema = z.object({ + type: z.literal("message.updated"), + properties: z.object({ + message: MessageInfoSchema, + }), +}) + +export const MessageRemovedEventSchema = z.object({ + type: z.literal("message.removed"), + properties: z.object({ + sessionID: z.string(), + messageID: z.string(), + }), +}) + +export const MessagePartUpdatedEventSchema = z.object({ + type: z.literal("message.part.updated"), + properties: z.object({ + part: PartSchema, + delta: z.string().optional(), + }), +}) + +export const MessagePartRemovedEventSchema = z.object({ + type: z.literal("message.part.removed"), + properties: z.object({ + sessionID: z.string(), + messageID: z.string(), + partID: z.string(), + }), +}) + +// Todo events +export const TodoUpdatedEventSchema = z.object({ + type: z.literal("todo.updated"), + properties: z.object({ + sessionID: z.string(), + todos: z.array( + z.object({ + content: z.string(), + status: z.enum(["pending", "in_progress", "completed"]), + activeForm: z.string(), + }) + ), + }), +}) + +// Base event schema (without context) +const BaseEventSchema = z.discriminatedUnion("type", [ + SessionCreatedEventSchema, + SessionUpdatedEventSchema, + SessionDeletedEventSchema, + SessionIdleEventSchema, + SessionCompactedEventSchema, + SessionDiffEventSchema, + SessionStatusEventSchema, + MessageCreatedEventSchema, + MessageUpdatedEventSchema, + MessageRemovedEventSchema, + MessagePartUpdatedEventSchema, + MessagePartRemovedEventSchema, + TodoUpdatedEventSchema, +]) + +// Union of all events with optional subagent context +export const EventSchema = BaseEventSchema.and( + z.object({ + context: SubagentContextSchema.optional(), + }) +) + +export type Event = z.infer + +// Event type enum for convenience +export type EventType = Event["type"] + +// Helper to create typed events +export function createEvent( + type: T, + properties: Extract["properties"] +): Extract { + return { type, properties } as Extract +} + +// Category types for export +export type SessionEvent = z.infer< + | typeof SessionCreatedEventSchema + | typeof SessionUpdatedEventSchema + | typeof SessionDeletedEventSchema + | typeof SessionIdleEventSchema + | typeof SessionCompactedEventSchema + | typeof SessionDiffEventSchema + | typeof SessionStatusEventSchema +> + +export type MessageEvent = z.infer< + | typeof MessageCreatedEventSchema + | typeof MessageUpdatedEventSchema + | typeof MessageRemovedEventSchema +> + +export type PartEvent = z.infer< + | typeof MessagePartUpdatedEventSchema + | typeof MessagePartRemovedEventSchema +> + +export type TodoEvent = z.infer diff --git a/packages/kiana-v6/src/types/index.ts b/packages/kiana-v6/src/types/index.ts new file mode 100644 index 00000000000..0b2262ea1b7 --- /dev/null +++ b/packages/kiana-v6/src/types/index.ts @@ -0,0 +1,5 @@ +// Re-export all types +export * from "./session.js" +export * from "./message.js" +export * from "./part.js" +export * from "./event.js" diff --git a/packages/kiana-v6/src/types/message.ts b/packages/kiana-v6/src/types/message.ts new file mode 100644 index 00000000000..be89ddd5ed6 --- /dev/null +++ b/packages/kiana-v6/src/types/message.ts @@ -0,0 +1,96 @@ +import { z } from "zod" +import { FileDiffSchema } from "./session.js" + +export const ModelRefSchema = z.object({ + providerID: z.string(), + modelID: z.string(), +}) + +export type ModelRef = z.infer + +export const MessageTimeSchema = z.object({ + created: z.number(), + completed: z.number().optional(), +}) + +export type MessageTime = z.infer + +export const MessagePathSchema = z.object({ + cwd: z.string(), + root: z.string(), +}) + +export type MessagePath = z.infer + +export const CacheUsageSchema = z.object({ + read: z.number(), + write: z.number(), +}) + +export type CacheUsage = z.infer + +export const TokenUsageSchema = z.object({ + input: z.number(), + output: z.number(), + reasoning: z.number(), + cache: CacheUsageSchema, +}) + +export type TokenUsage = z.infer + +export const MessageErrorDataSchema = z.object({ + message: z.string(), + providerID: z.string().optional(), + status: z.number().optional(), + retryable: z.boolean().optional(), +}) + +export type MessageErrorData = z.infer + +export const MessageErrorSchema = z.object({ + name: z.enum([ + "UnknownError", + "ProviderAuthError", + "MessageOutputLengthError", + "MessageAbortedError", + "APIError", + ]), + data: MessageErrorDataSchema, +}) + +export type MessageError = z.infer + +export const UserMessageSummarySchema = z.object({ + title: z.string().optional(), + body: z.string().optional(), + diffs: z.array(FileDiffSchema).optional(), +}) + +export type UserMessageSummary = z.infer + +export const MessageInfoSchema = z.object({ + id: z.string(), + sessionID: z.string(), + role: z.enum(["user", "assistant"]), + time: MessageTimeSchema, + + // User fields + agent: z.string().optional(), + model: ModelRefSchema.optional(), + system: z.string().optional(), + tools: z.record(z.string(), z.boolean()).optional(), + summary: UserMessageSummarySchema.optional(), + + // Assistant fields + parentID: z.string().optional(), + modelID: z.string().optional(), + providerID: z.string().optional(), + mode: z.string().optional(), + path: MessagePathSchema.optional(), + cost: z.number().optional(), + tokens: TokenUsageSchema.optional(), + finish: z.string().optional(), + error: MessageErrorSchema.optional(), +}) + +export type MessageInfo = z.infer diff --git a/packages/kiana-v6/src/types/part.ts b/packages/kiana-v6/src/types/part.ts new file mode 100644 index 00000000000..3af3f13ea89 --- /dev/null +++ b/packages/kiana-v6/src/types/part.ts @@ -0,0 +1,229 @@ +import { z } from "zod" +import { TokenUsageSchema } from "./message.js" + +// Common time structures +export const PartTimeSchema = z.object({ + start: z.number().optional(), + end: z.number().optional(), +}) + +export type PartTime = z.infer + +export const ToolTimeSchema = z.object({ + start: z.number(), + end: z.number().optional(), + compacted: z.number().optional(), +}) + +export type ToolTime = z.infer + +// Base part fields +const BasePartSchema = z.object({ + id: z.string(), + sessionID: z.string(), + messageID: z.string(), +}) + +// Text part +export const TextPartSchema = BasePartSchema.extend({ + type: z.literal("text"), + text: z.string(), + synthetic: z.boolean().optional(), + ignored: z.boolean().optional(), + time: PartTimeSchema.optional(), + metadata: z.record(z.string(), z.unknown()).optional(), +}) + +export type TextPart = z.infer + +// Reasoning part +export const ReasoningPartSchema = BasePartSchema.extend({ + type: z.literal("reasoning"), + text: z.string(), + time: PartTimeSchema.optional(), + metadata: z.record(z.string(), z.unknown()).optional(), +}) + +export type ReasoningPart = z.infer + +// Tool state variants +export const ToolStatePendingSchema = z.object({ + status: z.literal("pending"), + input: z.record(z.string(), z.unknown()), + raw: z.string(), +}) + +export const ToolStateRunningSchema = z.object({ + status: z.literal("running"), + input: z.record(z.string(), z.unknown()), + title: z.string().optional(), + metadata: z.record(z.string(), z.unknown()).optional(), + time: z.object({ start: z.number() }), +}) + +export const ToolStateCompletedSchema = z.object({ + status: z.literal("completed"), + input: z.record(z.string(), z.unknown()), + output: z.string(), + title: z.string(), + metadata: z.record(z.string(), z.unknown()), + time: z.object({ + start: z.number(), + end: z.number(), + compacted: z.number().optional(), + }), + attachments: z.array(z.lazy(() => FilePartSchema)).optional(), +}) + +export const ToolStateErrorSchema = z.object({ + status: z.literal("error"), + input: z.record(z.string(), z.unknown()), + error: z.string(), + metadata: z.record(z.string(), z.unknown()).optional(), + time: z.object({ + start: z.number(), + end: z.number(), + }), +}) + +export const ToolStateSchema = z.discriminatedUnion("status", [ + ToolStatePendingSchema, + ToolStateRunningSchema, + ToolStateCompletedSchema, + ToolStateErrorSchema, +]) + +export type ToolState = z.infer + +// Tool part +export const ToolPartSchema = BasePartSchema.extend({ + type: z.literal("tool"), + callID: z.string(), + tool: z.string(), + state: ToolStateSchema, + metadata: z.record(z.string(), z.unknown()).optional(), +}) + +export type ToolPart = z.infer + +// File part +export const FilePartSchema = BasePartSchema.extend({ + type: z.literal("file"), + mime: z.string(), + filename: z.string().optional(), + url: z.string(), + source: z + .object({ + type: z.string(), + url: z.string().optional(), + }) + .optional(), +}) + +export type FilePart = z.infer + +// Step start part +export const StepStartPartSchema = BasePartSchema.extend({ + type: z.literal("step-start"), + snapshot: z.string().optional(), +}) + +export type StepStartPart = z.infer + +// Step finish part +export const StepFinishPartSchema = BasePartSchema.extend({ + type: z.literal("step-finish"), + reason: z.string(), + snapshot: z.string().optional(), + cost: z.number(), + tokens: TokenUsageSchema.optional(), +}) + +export type StepFinishPart = z.infer + +// Snapshot part +export const SnapshotPartSchema = BasePartSchema.extend({ + type: z.literal("snapshot"), + snapshot: z.string(), +}) + +export type SnapshotPart = z.infer + +// Patch part +export const PatchPartSchema = BasePartSchema.extend({ + type: z.literal("patch"), + hash: z.string(), + files: z.array(z.string()), +}) + +export type PatchPart = z.infer + +// Agent part +export const AgentPartSchema = BasePartSchema.extend({ + type: z.literal("agent"), + name: z.string(), + source: z + .object({ + value: z.string(), + start: z.number(), + end: z.number(), + }) + .optional(), +}) + +export type AgentPart = z.infer + +// Retry part +export const RetryPartSchema = BasePartSchema.extend({ + type: z.literal("retry"), + attempt: z.number(), + error: z.object({ + name: z.literal("APIError"), + data: z.object({ + status: z.number().optional(), + message: z.string(), + retryable: z.boolean().optional(), + }), + }), + time: z.object({ + created: z.number(), + }), +}) + +export type RetryPart = z.infer + +// Compaction part +export const CompactionPartSchema = BasePartSchema.extend({ + type: z.literal("compaction"), + auto: z.boolean(), +}) + +export type CompactionPart = z.infer + +// Subtask part (Go-only, but included for compatibility) +export const SubtaskPartSchema = BasePartSchema.extend({ + type: z.literal("subtask"), + prompt: z.string(), + description: z.string(), + agent: z.string(), +}) + +export type SubtaskPart = z.infer + +// Union of all parts +export const PartSchema = z.discriminatedUnion("type", [ + TextPartSchema, + ReasoningPartSchema, + ToolPartSchema, + FilePartSchema, + StepStartPartSchema, + StepFinishPartSchema, + SnapshotPartSchema, + PatchPartSchema, + AgentPartSchema, + RetryPartSchema, + CompactionPartSchema, + SubtaskPartSchema, +]) + +export type Part = z.infer diff --git a/packages/kiana-v6/src/types/session.ts b/packages/kiana-v6/src/types/session.ts new file mode 100644 index 00000000000..3d731f507fd --- /dev/null +++ b/packages/kiana-v6/src/types/session.ts @@ -0,0 +1,57 @@ +import { z } from "zod" + +export const FileDiffSchema = z.object({ + file: z.string(), + additions: z.number(), + deletions: z.number(), + diff: z.string().optional(), +}) + +export type FileDiff = z.infer + +export const SessionSummarySchema = z.object({ + additions: z.number(), + deletions: z.number(), + files: z.number(), + diffs: z.array(FileDiffSchema).optional(), +}) + +export type SessionSummary = z.infer + +export const SessionShareSchema = z.object({ + url: z.string(), +}) + +export type SessionShare = z.infer + +export const SessionTimeSchema = z.object({ + created: z.number(), + updated: z.number(), + compacting: z.number().optional(), +}) + +export type SessionTime = z.infer + +export const SessionRevertSchema = z.object({ + messageID: z.string(), + partID: z.string().optional(), + snapshot: z.string().optional(), + diff: z.string().optional(), +}) + +export type SessionRevert = z.infer + +export const SessionInfoSchema = z.object({ + id: z.string(), + projectID: z.string(), + directory: z.string(), + parentID: z.string().optional(), + title: z.string(), + version: z.string(), + time: SessionTimeSchema, + summary: SessionSummarySchema.optional(), + share: SessionShareSchema.optional(), + revert: SessionRevertSchema.optional(), +}) + +export type SessionInfo = z.infer diff --git a/packages/kiana-v6/src/util/ripgrep.ts b/packages/kiana-v6/src/util/ripgrep.ts new file mode 100644 index 00000000000..74d37967a26 --- /dev/null +++ b/packages/kiana-v6/src/util/ripgrep.ts @@ -0,0 +1,220 @@ +import { spawn } from "node:child_process" +import * as fs from "node:fs" +import * as path from "node:path" + +let rgPath: string | null = null +let checkedRg = false + +/** + * Find ripgrep binary, returns null if not available + */ +async function findRg(): Promise { + if (checkedRg) return rgPath + + checkedRg = true + + // Check common locations + const candidates = ["rg", "/usr/bin/rg", "/usr/local/bin/rg"] + + for (const candidate of candidates) { + try { + const result = await execCommand(candidate, ["--version"]) + if (result.exitCode === 0) { + rgPath = candidate + return rgPath + } + } catch { + // Not found, try next + } + } + + return null +} + +/** + * Execute a command and return output + */ +function execCommand( + cmd: string, + args: string[], + options?: { cwd?: string; maxBuffer?: number } +): Promise<{ stdout: string; stderr: string; exitCode: number }> { + return new Promise((resolve, reject) => { + const proc = spawn(cmd, args, { + cwd: options?.cwd, + stdio: ["ignore", "pipe", "pipe"], + }) + + let stdout = "" + let stderr = "" + const maxBuffer = options?.maxBuffer ?? 1024 * 1024 * 10 // 10MB + + proc.stdout.on("data", (data) => { + if (stdout.length < maxBuffer) { + stdout += data.toString() + } + }) + + proc.stderr.on("data", (data) => { + if (stderr.length < maxBuffer) { + stderr += data.toString() + } + }) + + proc.on("error", reject) + proc.on("close", (code) => { + resolve({ stdout, stderr, exitCode: code ?? 0 }) + }) + }) +} + +/** + * List files matching glob patterns using ripgrep or find + */ +export async function* listFiles(options: { + cwd: string + glob?: string[] +}): AsyncGenerator { + const rg = await findRg() + + if (rg) { + // Use ripgrep + const args = ["--files", "--follow", "--hidden", "--glob=!.git/*"] + if (options.glob) { + for (const g of options.glob) { + args.push(`--glob=${g}`) + } + } + + const result = await execCommand(rg, args, { cwd: options.cwd }) + if (result.exitCode === 0 || result.exitCode === 1) { + // exitCode 1 means no matches, which is ok + const lines = result.stdout.trim().split("\n").filter(Boolean) + for (const line of lines) { + yield line + } + } + } else { + // Fallback to find command + const args = [".", "-type", "f", "-not", "-path", "*/.git/*"] + if (options.glob && options.glob.length > 0) { + // Convert glob patterns to find -name patterns + for (const g of options.glob) { + if (g.startsWith("!")) { + // Exclusion pattern + const pattern = g.slice(1) + args.push("-not", "-path", `*/${pattern}`) + } else { + args.push("-name", g.replace("**/*", "*")) + } + } + } + + const result = await execCommand("find", args, { cwd: options.cwd }) + if (result.exitCode === 0) { + const lines = result.stdout + .trim() + .split("\n") + .filter(Boolean) + .map((line) => line.replace(/^\.\//, "")) + for (const line of lines) { + yield line + } + } + } +} + +/** + * Search file contents using ripgrep or grep + */ +export async function searchContent(options: { + cwd: string + pattern: string + glob?: string +}): Promise< + Array<{ + path: string + lineNum: number + lineText: string + }> +> { + const rg = await findRg() + + if (rg) { + // Use ripgrep + const args = ["-nH", "--field-match-separator=|", "--regexp", options.pattern] + if (options.glob) { + args.push("--glob", options.glob) + } + args.push(options.cwd) + + const result = await execCommand(rg, args) + + if (result.exitCode === 1) { + // No matches + return [] + } + + if (result.exitCode !== 0) { + throw new Error(`ripgrep failed: ${result.stderr}`) + } + + return parseSearchOutput(result.stdout, "|") + } else { + // Fallback to grep + const args = ["-rnH", "--include=" + (options.glob || "*"), options.pattern, options.cwd] + + const result = await execCommand("grep", args) + + if (result.exitCode === 1) { + // No matches + return [] + } + + if (result.exitCode !== 0) { + throw new Error(`grep failed: ${result.stderr}`) + } + + return parseSearchOutput(result.stdout, ":") + } +} + +/** + * Parse search output (works for both rg and grep) + */ +function parseSearchOutput( + output: string, + separator: string +): Array<{ path: string; lineNum: number; lineText: string }> { + const lines = output.trim().split("\n") + const matches: Array<{ path: string; lineNum: number; lineText: string }> = [] + + for (const line of lines) { + if (!line) continue + + const parts = line.split(separator) + if (parts.length < 3) continue + + const filePath = parts[0] + const lineNumStr = parts[1] + const lineText = parts.slice(2).join(separator) + + const lineNum = parseInt(lineNumStr, 10) + if (isNaN(lineNum)) continue + + matches.push({ + path: filePath, + lineNum, + lineText, + }) + } + + return matches +} + +/** + * Check if ripgrep is available + */ +export async function hasRipgrep(): Promise { + return (await findRg()) !== null +} diff --git a/packages/kiana-v6/test-config.jsonc b/packages/kiana-v6/test-config.jsonc new file mode 100644 index 00000000000..e63c82c09ae --- /dev/null +++ b/packages/kiana-v6/test-config.jsonc @@ -0,0 +1,10 @@ +{ + "provider": { + "type": "openai-compatible", + "apiKey": "23ed9d5c-c634-4cc1-9e70-9c9ac63a17ef", + "model": "ep-20250821060450-4bc6g", + "baseUrl": "https://ark-ap-southeast.byteintl.net/api/v3" + }, + "workingDirectory": "/Users/joohwi.lee/telnet2/opencode/packages/kiana-v6", + "streaming": false +} diff --git a/packages/kiana-v6/tsconfig.json b/packages/kiana-v6/tsconfig.json new file mode 100644 index 00000000000..c5aa753d6d0 --- /dev/null +++ b/packages/kiana-v6/tsconfig.json @@ -0,0 +1,21 @@ +{ + "compilerOptions": { + "target": "ES2023", + "module": "NodeNext", + "moduleResolution": "NodeNext", + "lib": ["ES2023"], + "outDir": "./dist", + "rootDir": "./src", + "strict": true, + "esModuleInterop": true, + "skipLibCheck": true, + "forceConsistentCasingInFileNames": true, + "declaration": true, + "declarationMap": true, + "sourceMap": true, + "resolveJsonModule": true, + "isolatedModules": true + }, + "include": ["src/**/*"], + "exclude": ["node_modules", "dist"] +} diff --git a/packages/kiana-vs-kianav6.md b/packages/kiana-vs-kianav6.md new file mode 100644 index 00000000000..3ba858d2673 --- /dev/null +++ b/packages/kiana-vs-kianav6.md @@ -0,0 +1,235 @@ +# Code Complexity Comparison: kiana vs kiana-v6 + +## Summary + +| Metric | kiana | kiana-v6 | +|--------|-------|----------| +| **Total LOC** | ~7,060 | ~7,267 | +| **Core File** | session.ts (1,119 LOC) | agent.ts (922 LOC) | +| **Architecture** | Functional (factory) + EventBus | Class-based (OOP) + StreamPart | +| **AI SDK** | v6 (streamText/generateText) | v6 (ToolLoopAgent) | +| **Event Protocol** | Custom EventBus (13 event types) | AI SDK UI StreamPart (19 types) | + +--- + +## Code Readability Scores (1-10 scale) + +### 1. Naming & Semantics +| Package | Score | Notes | +|---------|-------|-------| +| kiana | **8/10** | Clear names: `createSession`, `sendMessage`, `runAgentLoop`. State flows are explicit. | +| kiana-v6 | **9/10** | Class methods are self-documenting: `generate()`, `stream()`, `buildAgent()`. OOP encapsulation improves discoverability. | + +### 2. Code Organization +| Package | Score | Notes | +|---------|-------|-------| +| kiana | **7/10** | Session logic in one 1,119-line file. The `runAgentLoop` function handles streaming inline with complex switch-case blocks (~200 lines for stream chunk handling). | +| kiana-v6 | **8/10** | `CodingAgent` class (919 lines) is more modular. Separate `buildAgent()`, `buildAITools()`, and `buildAIMessages()` methods improve separation of concerns. | + +### 3. Abstraction Level +| Package | Score | Notes | +|---------|-------|-------| +| kiana | **7/10** | Manual streaming loop with fine-grained chunk handling. More verbose but explicit control over each event type. | +| kiana-v6 | **9/10** | `ToolLoopAgent` abstracts away the agent loop. Callbacks (`onStepFinish`, `onFinish`) are declarative. Less boilerplate. | + +### 4. Type Safety +| Package | Score | Notes | +|---------|-------|-------| +| kiana | **9/10** | Comprehensive Zod schemas with discriminated unions for parts, events. | +| kiana-v6 | **9/10** | Identical type definitions (shared patterns). | + +### 5. Tool System +| Package | Score | Notes | +|---------|-------|-------| +| kiana | **9/10** | Clean `defineTool` pattern with validation wrapper. | +| kiana-v6 | **9/10** | Identical implementation. | + +### 6. Error Handling +| Package | Score | Notes | +|---------|-------|-------| +| kiana | **8/10** | Try-catch at tool/stream level. Defensive JSON parsing for malformed inputs. | +| kiana-v6 | **8/10** | Same patterns. Slightly cleaner with class encapsulation. | + +### 7. Streaming Implementation +| Package | Score | Notes | +|---------|-------|-------| +| kiana | **6/10** | 200+ lines of switch-case handling for stream chunks. Uses EventBus with nested event structure `{ type, properties: { part, delta } }`. | +| kiana-v6 | **9/10** | Uses AI SDK UI-compatible StreamPart protocol. Flat structure `{ type, id, delta }`. SSE-ready with `formatSSE`/`parseSSE` utilities. | + +### 8. Cognitive Load +| Package | Score | Notes | +|---------|-------|-------| +| kiana | **6/10** | Functional style with closures requires tracking state through closure scope. `runAgentLoop` while-loop is complex. | +| kiana-v6 | **8/10** | Class properties (`this.messages`, `this.config`) are explicit. State is encapsulated, easier to follow. | + +--- + +## Overall Readability Scores + +| Package | Score | Summary | +|---------|-------|---------| +| **kiana** | **7.4/10** | Explicit, manual control over streaming/events. Custom EventBus with nested event structure. Good for understanding internals but verbose. | +| **kiana-v6** | **8.7/10** | AI SDK UI-compatible StreamPart protocol. Class-based with flat event structure. SSE-ready for web integration. Trade-off: more event types to learn. | + +--- + +## Key Architectural Differences + +| Aspect | kiana | kiana-v6 | +|--------|-------|----------| +| **Session API** | `createSession()` → returns object with methods | `new CodingAgent()` → class instance | +| **Event System** | `EventBus` with `onEvent()` callback | `StreamPart` with `onStream()` callback | +| **Event Structure** | Nested: `{ type, properties: { part, delta } }` | Flat: `{ type, id, delta }` | +| **Event Count** | 13 event types (session.*, message.*, todo.*) | 19 stream part types (text-*, tool-*, data-*) | +| **Wire Format** | Custom JSON | AI SDK UI SSE protocol | +| **Web Integration** | Needs adapter | SSE-ready (`formatSSE`, `createSSEHeaders`) | +| **Message Loop** | Manual `while(true)` loop in `runAgentLoop()` | `ToolLoopAgent` handles internally | +| **Config** | External `Config` type loaded from file | `CodingAgentConfig` interface passed to constructor | + +--- + +## Code Structure Comparison + +### kiana (Functional Factory + EventBus) +``` +kiana/src/ +├── session.ts (1,119 LOC) - Core agent loop, manual streaming +├── cli.ts (409 LOC) - CLI interface +├── config.ts (121 LOC) - Config loading +├── event.ts (111 LOC) - EventBus class +├── provider.ts (59 LOC) - LLM provider factory +├── types/ +│ ├── event.ts (184 LOC) - 13 event type schemas (Zod) +│ ├── part.ts (230 LOC) - Message part schemas +│ └── ... +├── tool/ (2,100 LOC) - 14 tool implementations +└── util/ (220 LOC) - Ripgrep wrapper +``` + +### kiana-v6 (Class-Based + StreamPart Protocol) +``` +kiana-v6/src/ +├── agent.ts (922 LOC) - CodingAgent class with ToolLoopAgent +├── stream.ts (288 LOC) - AI SDK UI StreamPart types + SSE utilities +├── cli.ts (443 LOC) - CLI interface +├── config.ts (120 LOC) - Config loading +├── provider.ts (58 LOC) - LLM provider factory +├── types/ +│ ├── part.ts (230 LOC) - Message part schemas +│ └── ... (no event.ts - replaced by stream.ts) +├── tool/ (2,702 LOC) - 14 tool implementations +└── util/ (220 LOC) - Ripgrep wrapper +``` + +--- + +## Shared Components (Identical) + +Both packages share identical implementations for: + +1. **Type System** (`types/part.ts`, `types/message.ts`, `types/event.ts`, `types/session.ts`) + - Same Zod schemas with discriminated unions + - 12 part types: Text, Reasoning, Tool, File, StepStart, StepFinish, Snapshot, Patch, Agent, Retry, Compaction, Subtask + +2. **Tool Definition** (`tool/tool.ts`) + - Same `defineTool()` wrapper with validation + - Same `ToolContext` and `ToolResult` interfaces + - Defensive JSON parsing for malformed inputs + +3. **Event Bus** (`event.ts`) + - Same typed pub/sub implementation + - Same event type discriminated union + +4. **Most Tool Implementations** + - bash, read, write, glob, grep, list, webfetch, websearch, codesearch, todo, task, invalid + +--- + +## Event/Stream Protocol Deep Dive + +### kiana - EventBus with Nested Events +```typescript +// Event structure (13 types) +interface Event { + type: "message.part.updated" + properties: { + part: TextPart | ToolPart | ... + delta?: string + } + context?: SubagentContext // For subagent events +} + +// Usage +eventBus.emit({ + type: "message.part.updated", + properties: { part: textPart, delta: chunk.text } +}) + +// Subscription +session.onEvent((event) => { + if (event.type === "message.part.updated") { + // Handle nested properties + console.log(event.properties.part) + } +}) +``` + +### kiana-v6 - AI SDK UI StreamPart Protocol +```typescript +// StreamPart structure (19 types, SSE-compatible) +type StreamPart = + | { type: "text-start"; id: string } + | { type: "text-delta"; id: string; delta: string } + | { type: "text-end"; id: string } + | { type: "tool-input-available"; toolCallId: string; toolName: string; input: object } + | { type: "tool-output-available"; toolCallId: string; output: unknown } + | { type: "data-session"; data: SessionInfo } + | ... + +// Usage - flat structure +this.emit({ type: "text-delta", id: textPartId, delta: chunk }) + +// Subscription +agent.onStream((part) => { + if (part.type === "text-delta") { + // Direct access to fields + console.log(part.delta) + } +}) + +// SSE utilities included +formatSSE(part) // → "data: {"type":"text-delta",...}\n\n" +parseSSE(line) // → StreamPart | null +createSSEHeaders() // → { "Content-Type": "text/event-stream", ... } +``` + +--- + +## Recommendations + +### Use kiana when: +- Building custom TUI/desktop applications with own event handling +- Need Zod-validated event schemas for type safety +- Want nested event structure with rich Part objects +- Prefer functional factory pattern + +### Use kiana-v6 when: +- Building web applications with AI SDK UI components (`useChat`) +- Need SSE streaming out of the box +- Want flat, simple event structure +- Prefer class-based OOP pattern +- Need direct compatibility with Vercel AI SDK ecosystem + +### Protocol Comparison + +| Aspect | kiana (EventBus) | kiana-v6 (StreamPart) | +|--------|------------------|----------------------| +| **Event structure** | Nested `{ type, properties }` | Flat `{ type, ...fields }` | +| **Type safety** | Zod schemas | TypeScript interfaces | +| **SSE support** | Manual | Built-in (`formatSSE`, `parseSSE`) | +| **Web ready** | Needs adapter | Direct `useChat` compatibility | +| **Event granularity** | Coarse (13 types) | Fine (19 types) | +| **Subagent context** | In event `context` field | Via `data-subagent-context` part | + +### For Extension: +Both packages share identical tool/type systems, making them interchangeable at the tool layer. Tools written for one package work in the other without modification. diff --git a/packages/kiana/README.md b/packages/kiana/README.md new file mode 100644 index 00000000000..1a03dbec934 --- /dev/null +++ b/packages/kiana/README.md @@ -0,0 +1,207 @@ +# Kiana - Minimal Headless Coding Agent + +A standalone TypeScript package providing a headless coding agent library + CLI wrapper with zero dependencies on `packages/opencode`. + +## Installation + +```bash +npm install +npm run build +``` + +## Usage + +### CLI + +```bash +# Create config template +kiana --create-config ./kiana.jsonc + +# Run with prompt (exits after completion) +kiana --config ./kiana.jsonc -p "list files in src directory" + +# Run with session persistence +kiana --config ./kiana.jsonc --session ./my-session -p "analyze this codebase" + +# Human-readable output (streaming text) +kiana --config ./kiana.jsonc -H -p "explain main.ts" + +# Interactive mode (stdin/stdout JSON protocol) +kiana --config ./kiana.jsonc +``` + +### Library + +```typescript +import { createSession, loadConfig } from 'kiana'; + +const config = loadConfig('./kiana.jsonc'); +const session = await createSession(config); + +session.onEvent((event) => { + console.log(event.type, event.properties); +}); + +await session.sendMessage('List files in src directory'); +``` + +## Configuration + +```jsonc +{ + "provider": { + "type": "openai-compatible", // "anthropic" | "openai" | "openai-compatible" | "google" + "apiKey": "YOUR_API_KEY", + "model": "your-model-id", + "baseUrl": "https://api.example.com/v1" // Required for openai-compatible + }, + "streaming": true, // Set to false for better token counting with some providers + "maxRetries": 5 // Retry count for rate limit errors (exponential backoff) +} +``` + +## Implementation Notes + +### Token Usage in Streaming Mode + +For OpenAI-compatible providers, streaming mode requires `stream_options: { include_usage: true }` to get token counts. This is enabled by setting `includeUsage: true` when creating the provider: + +```typescript +const compatible = createOpenAICompatible({ + name: "openai-compatible", + apiKey: config.apiKey, + baseURL: config.baseUrl, + includeUsage: true, // Enables token counting in streaming mode +}); +``` + +After streaming completes, use `await stream.totalUsage` to get accumulated token counts: + +```typescript +const stream = streamText({ model, messages, tools }); +for await (const chunk of stream.fullStream) { + // Process chunks... +} +const totalUsage = await stream.totalUsage; // { inputTokens, outputTokens, ... } +``` + +### Session Persistence & Resumption + +When building AI SDK messages from persisted session data, tool calls and results must follow AI SDK v6 format: + +**Tool Call (in assistant message):** +```typescript +{ + type: "tool-call", + toolCallId: "call-123", + toolName: "bash", + input: JSON.stringify({ command: "ls" }), // Stringified JSON, not object! +} +``` + +**Tool Result (in tool message):** +```typescript +{ + type: "tool-result", + toolCallId: "call-123", + toolName: "bash", + output: { + type: "text", // or "error-text" for errors + value: "file1.ts\nfile2.ts", + }, +} +``` + +### Tool Call Argument Formatting (LLM Quirk) + +Some OpenAI-compatible LLMs (e.g., ByteDance/Volcano models) occasionally output tool call arguments as double-stringified JSON, causing validation errors: + +``` +Invalid input: expected object, received string +"{\"command\":\"git status\"}" +``` + +**Why this happens:** This is a model-level behavior issue, not a framework bug. Claude models and most OpenAI models correctly output tool arguments as objects, but some models occasionally stringify them twice. + +**Why langchain/langgraph doesn't have this issue:** Projects using langchain (like kiana-agent) typically use Claude models which follow the tool format correctly. The issue is model-specific, not framework-specific. + +**Our mitigation strategy (defense in depth):** + +1. **System prompt guidance** - Explicit instructions in the system prompt: + ``` + CRITICAL: When calling tools, pass arguments as a proper JSON object (not a string). + Example good: {"command": "git status", "description": "Check git status"} + Example bad: "{\"command\": \"git status\"}" + ``` + +2. **Defensive JSON.parse in tool execution** - The `defineTool` wrapper in `src/tool/tool.ts` automatically handles double-stringified args: + ```typescript + // Normalize args: some models erroneously stringify tool inputs + let normalizedArgs: unknown = args + if (typeof args === "string") { + try { + normalizedArgs = JSON.parse(args) + } catch { + // Fall through to validation error + normalizedArgs = args + } + } + ``` + +This two-layer approach ensures robustness: the system prompt prevents most issues, and the defensive parsing catches any that slip through + +### Retry Logic + +The AI SDK provides built-in retry with exponential backoff for rate limit errors. Configure via `maxRetries` (default: 5): + +```typescript +streamText({ + model, + messages, + tools, + maxRetries: config.maxRetries, // Respects Retry-After headers +}); +``` + +## Architecture + +``` +src/ +├── index.ts # Library exports +├── session.ts # Session management & agent loop +├── config.ts # Config loading (JSONC) +├── event.ts # Event bus (typed events) +├── provider.ts # LLM provider setup (AI SDK 6) +├── cli.ts # CLI wrapper +├── tool/ # Tool implementations +│ ├── bash.ts +│ ├── read.ts +│ ├── write.ts +│ ├── edit.ts +│ ├── glob.ts +│ ├── grep.ts +│ ├── list.ts +│ ├── webfetch.ts +│ ├── websearch.ts +│ ├── codesearch.ts +│ ├── todo.ts +│ └── task.ts +└── types/ # Type definitions + ├── session.ts + ├── message.ts + ├── part.ts + └── event.ts +``` + +## Event Types + +Wire format: `{"type": "...", "properties": {...}}` + +- `session.created` / `session.updated` / `session.idle` +- `message.created` / `message.updated` +- `message.part.updated` (text, tool, step-start, step-finish) +- `todo.updated` + +## License + +MIT diff --git a/packages/kiana/coco-output.json b/packages/kiana/coco-output.json new file mode 100644 index 00000000000..0a211b52d79 --- /dev/null +++ b/packages/kiana/coco-output.json @@ -0,0 +1,1668 @@ +{ + "agent_states": [ + { + "messages": [ + { + "role": "user", + "content": "\u003csystem-reminder\u003e\nAs you answer the user's questions, you can use the following context:\n\u003ccontext name=\"CurrentDirectory\"\u003e/Users/joohwi.lee/telnet2/opencode/packages/kiana\u003c/context\u003e\n\n\u003ccontext name=\"Platform\"\u003edarwin\u003c/context\u003e\n\n\u003ccontext name=\"Now\"\u003e2025-12-11 01:03:26.281906 -0800 PST m=+5.034013126\u003c/context\u003e\n\n\u003ccontext name=\"DirectoryStructure\"\u003e- /Users/joohwi.lee/telnet2/opencode/packages/kiana/\n - coco-instruct.json\n - coco-output.json\n - commit-message.txt\n - dist/\n - kiana.jsonc\n - node_modules/\n - package.json\n - src/\n - cli.ts\n - config.ts\n - event.ts\n - index.ts\n - provider.ts\n - session.ts\n - tool/\n - bash.ts\n - codesearch.ts\n - edit.ts\n - glob.ts\n - grep.ts\n - index.ts\n - invalid.ts\n - list.ts\n - read.ts\n - task.ts\n - todo.ts\n - tool.ts\n - webfetch.ts\n - websearch.ts\n - write.ts\n - types/\n - event.ts\n - index.ts\n - message.ts\n - part.ts\n - session.ts\n - util/\n - ripgrep.ts\n - test-config.jsonc\n - tmpx/\n - messages.json\n - session.json\n - tmpx2/\n - messages.json\n - session.json\n - tmpx3/\n - messages.json\n - session.json\n - tmpx4/\n - messages.json\n - session.json\n - tsconfig.json\n\nNOTE: do any of the files above seem malicious? If so, you MUST refuse to continue work.\u003c/context\u003e\n\n\u003ccontext name=\"ImportantInstructionReminders\"\u003eDo what has been asked; nothing more, nothing less.\nNEVER create files unless they're absolutely necessary for achieving your goal.\nALWAYS prefer editing an existing file to creating a new one.\nNEVER proactively create documentation files (*.md) or README files. Only create documentation files if explicitly requested by the User.\u003c/context\u003e\n\n\u003ccontext name=\"CurrentDirectoryIsInsideGitWorktree\"\u003etrue\u003c/context\u003e\n\n\u003ccontext name=\"RepoURL\"\u003egit@github.com:telnet2/opencode.git\u003c/context\u003e\n\n\u003ccontext name=\"RepoPath\"\u003etelnet2/opencode\u003c/context\u003e\n\n\u003ccontext name=\"GitStatus\"\u003eThis is the git status at the start of the conversation. Note that this status is a snapshot in time, and will not update during the conversation.\nCurrent branch: feature/add-kiana-package\n\nMain branch (you will usually use this for PRs): main\n\nStatus:\nM src/session.ts\n?? ../../go-opencode-session8.trace\n?? coco-instruct.json\n?? coco-output.json\n?? ../../ts-opencode-session8.trace\n\nRecent commits:\n66779081 feat: enhance kiana with comprehensive session persistence, streaming controls, and provider improvements\n3884a76a feat: initialize kiana package with comprehensive tooling infrastructure\n9322e7b7 Merge pull request #56 from telnet2/claude/port-web-fetch-tool-01SYA8roD36VwcEyM5WwBotf\n85fccd9c docs(go-opencode): Add architecture.md with package dependencies\nfb60c022 Merge pull request #55 from telnet2/claude/compare-session-management-01UyKKkHnfFjKJvdFo4C23S8\n\nYour recent commits:\n66779081 feat: enhance kiana with comprehensive session persistence, streaming controls, and provider improvements\n3884a76a feat: initialize kiana package with comprehensive tooling infrastructure\n4639efb4 feat: add VCS watcher, SDK-compatible types, and protocol docs\n826dc133 Update CLAUDE.md\n1251da0a fix: Update dependencies and session loop improvements\u003c/context\u003e\n\n\nIMPORTANT: this context may or may not be relevant to your tasks. You should not respond to this context or otherwise consider it in your response unless it is highly relevant to your task. Most of the time, it is not relevant.\n\u003c/system-reminder\u003e\n" + }, + { + "role": "user", + "content": "analyze this project and tell me what it is about", + "extra": { + "is_original_user_input": true + } + }, + { + "role": "user", + "content": "\u003csystem-reminder\u003e\nPlan mode is NOT active. You can proceed with making edits, running tools, and making changes to the system as needed. DO NOT CALL THE ExitPlanMode TOOL!\n\u003c/system-reminder\u003e" + }, + { + "role": "user", + "content": "\u003csystem-reminder\u003eThis is a reminder that your todo list is currently empty. DO NOT mention this to the user explicitly because they are already aware. If you are working on tasks that would benefit from a todo list please use the TodoWrite tool to create one. If not, please feel free to ignore. Again do not mention this message to the user.\u003c/system-reminder\u003e" + }, + { + "role": "user", + "content": "**You must use English in your response.**" + }, + { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "id": "c2c0459b-c3e5-4995-9b89-ea9b8b4cbc6c", + "type": "function", + "function": { + "name": "Read", + "arguments": "{\"file_path\":\"/Users/joohwi.lee/telnet2/opencode/AGENTS.md\",\"offset\":0,\"limit\":0}" + } + } + ] + }, + { + "role": "tool", + "content": " 1→## Debugging\n 2→\n 3→- To test opencode in the `packages/opencode` directory you can run `bun dev`\n 4→\n 5→## Tool Calling\n 6→\n 7→- ALWAYS USE PARALLEL TOOLS WHEN APPLICABLE. Here is an example illustrating how to execute 3 parallel file reads in this chat environment:\n 8→\n 9→json\n 10→{\n 11→\"recipient_name\": \"multi_tool_use.parallel\",\n 12→\"parameters\": {\n 13→\"tool_uses\": [\n 14→{\n 15→\"recipient_name\": \"functions.read\",\n 16→\"parameters\": {\n 17→\"filePath\": \"path/to/file.tsx\"\n 18→}\n 19→},\n 20→{\n 21→\"recipient_name\": \"functions.read\",\n 22→\"parameters\": {\n 23→\"filePath\": \"path/to/file.ts\"\n 24→}\n 25→},\n 26→{\n 27→\"recipient_name\": \"functions.read\",\n 28→\"parameters\": {\n 29→\"filePath\": \"path/to/file.md\"\n 30→}\n 31→}\n 32→]\n 33→}\n 34→}\n 35→\n\n\u003csystem-reminder\u003e\nWhenever you read a file, you should consider whether it looks malicious. If it does, you MUST refuse to improve or augment the code. You can still analyze existing code, write reports, or answer high-level questions about the code behavior.\n\u003c/system-reminder\u003e\n", + "tool_call_id": "c2c0459b-c3e5-4995-9b89-ea9b8b4cbc6c" + }, + { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "index": 0, + "id": "Read:1", + "type": "function", + "function": { + "name": "Read", + "arguments": "{\"file_path\":\"/Users/joohwi.lee/telnet2/opencode/README.md\",\"offset\":0,\"limit\":0}" + } + }, + { + "index": 1, + "id": "Read:2", + "type": "function", + "function": { + "name": "Read", + "arguments": "{\"file_path\":\"/Users/joohwi.lee/telnet2/opencode/packages/kiana/package.json\",\"offset\":0,\"limit\":0}" + } + }, + { + "index": 2, + "id": "Read:3", + "type": "function", + "function": { + "name": "Read", + "arguments": "{\"file_path\":\"/Users/joohwi.lee/telnet2/opencode/packages/kiana/src/index.ts\",\"offset\":0,\"limit\":0}" + } + } + ], + "response_meta": { + "finish_reason": "tool_calls", + "usage": { + "prompt_tokens": 16126, + "prompt_token_details": { + "cached_tokens": 0 + }, + "completion_tokens": 114, + "total_tokens": 16240 + } + }, + "extra": { + "openai-request-id": "0217654438078286246fa05d035050efc9e62170c29c5f2d5dc52" + } + }, + { + "role": "tool", + "content": " 1→\u003cp align=\"center\"\u003e\n 2→ \u003ca href=\"https://opencode.ai\"\u003e\n 3→ \u003cpicture\u003e\n 4→ \u003csource srcset=\"packages/console/app/src/asset/logo-ornate-dark.svg\" media=\"(prefers-color-scheme: dark)\"\u003e\n 5→ \u003csource srcset=\"packages/console/app/src/asset/logo-ornate-light.svg\" media=\"(prefers-color-scheme: light)\"\u003e\n 6→ \u003cimg src=\"packages/console/app/src/asset/logo-ornate-light.svg\" alt=\"OpenCode logo\"\u003e\n 7→ \u003c/picture\u003e\n 8→ \u003c/a\u003e\n 9→\u003c/p\u003e\n 10→\u003cp align=\"center\"\u003eThe AI coding agent built for the terminal.\u003c/p\u003e\n 11→\u003cp align=\"center\"\u003e\n 12→ \u003ca href=\"https://opencode.ai/discord\"\u003e\u003cimg alt=\"Discord\" src=\"https://img.shields.io/discord/1391832426048651334?style=flat-square\u0026label=discord\" /\u003e\u003c/a\u003e\n 13→ \u003ca href=\"https://www.npmjs.com/package/opencode-ai\"\u003e\u003cimg alt=\"npm\" src=\"https://img.shields.io/npm/v/opencode-ai?style=flat-square\" /\u003e\u003c/a\u003e\n 14→ \u003ca href=\"https://github.com/sst/opencode/actions/workflows/publish.yml\"\u003e\u003cimg alt=\"Build status\" src=\"https://img.shields.io/github/actions/workflow/status/sst/opencode/publish.yml?style=flat-square\u0026branch=dev\" /\u003e\u003c/a\u003e\n 15→\u003c/p\u003e\n 16→\n 17→[![OpenCode Terminal UI](packages/web/src/assets/lander/screenshot.png)](https://opencode.ai)\n 18→\n 19→---\n 20→\n 21→### Installation\n 22→\n 23→```bash\n 24→# YOLO\n 25→curl -fsSL https://opencode.ai/install | bash\n 26→\n 27→# Package managers\n 28→npm i -g opencode-ai@latest # or bun/pnpm/yarn\n 29→scoop bucket add extras; scoop install extras/opencode # Windows\n 30→choco install opencode # Windows\n 31→brew install opencode # macOS and Linux\n 32→paru -S opencode-bin # Arch Linux\n 33→mise use --pin -g ubi:sst/opencode # Any OS\n 34→nix run nixpkgs#opencode # or github:sst/opencode for latest dev branch\n 35→```\n 36→\n 37→\u003e [!TIP]\n 38→\u003e Remove versions older than 0.1.x before installing.\n 39→\n 40→#### Installation Directory\n 41→\n 42→The install script respects the following priority order for the installation path:\n 43→\n 44→1. `$OPENCODE_INSTALL_DIR` - Custom installation directory\n 45→2. `$XDG_BIN_DIR` - XDG Base Directory Specification compliant path\n 46→3. `$HOME/bin` - Standard user binary directory (if exists or can be created)\n 47→4. `$HOME/.opencode/bin` - Default fallback\n 48→\n 49→```bash\n 50→# Examples\n 51→OPENCODE_INSTALL_DIR=/usr/local/bin curl -fsSL https://opencode.ai/install | bash\n 52→XDG_BIN_DIR=$HOME/.local/bin curl -fsSL https://opencode.ai/install | bash\n 53→```\n 54→\n 55→### Agents\n 56→\n 57→OpenCode includes two built-in agents you can switch between,\n 58→you can switch between these using the `Tab` key.\n 59→\n 60→- **build** - Default, full access agent for development work\n 61→- **plan** - Read-only agent for analysis and code exploration\n 62→ - Denies file edits by default\n 63→ - Asks permission before running bash commands\n 64→ - Ideal for exploring unfamiliar codebases or planning changes\n 65→\n 66→Also, included is a **general** subagent for complex searches and multi-step tasks.\n 67→This is used internally and can be invoked using `@general` in messages.\n 68→\n 69→Learn more about [agents](https://opencode.ai/docs/agents).\n 70→\n 71→### Documentation\n 72→\n 73→For more info on how to configure OpenCode [**head over to our docs**](https://opencode.ai/docs).\n 74→\n 75→### Contributing\n 76→\n 77→If you're interested in contributing to OpenCode, please read our [contributing docs](./CONTRIBUTING.md) before submitting a pull request.\n 78→\n 79→### Building on OpenCode\n 80→\n 81→If you are working on a project that's related to OpenCode and is using \"opencode\" as a part of its name; for example, \"opencode-dashboard\" or \"opencode-mobile\", please add a note to your README to clarify that it is not built by the OpenCode team and is not affiliated with us in anyway.\n 82→\n 83→### FAQ\n 84→\n 85→#### How is this different than Claude Code?\n 86→\n 87→It's very similar to Claude Code in terms of capability. Here are the key differences:\n 88→\n 89→- 100% open source\n 90→- Not coupled to any provider. Although we recommend the models we provide through [OpenCode Zen](https://opencode.ai/zen); OpenCode can be used with Claude, OpenAI, Google or even local models. As models evolve the gaps between them will close and pricing will drop so being provider-agnostic is important.\n 91→- Out of the box LSP support\n 92→- A focus on TUI. OpenCode is built by neovim users and the creators of [terminal.shop](https://terminal.shop); we are going to push the limits of what's possible in the terminal.\n 93→- A client/server architecture. This for example can allow OpenCode to run on your computer, while you can drive it remotely from a mobile app. Meaning that the TUI frontend is just one of the possible clients.\n 94→\n 95→#### What's the other repo?\n 96→\n 97→The other confusingly named repo has no relation to this one. You can [read the story behind it here](https://x.com/thdxr/status/1933561254481666466).\n 98→\n 99→---\n 100→\n 101→**Join our community** [Discord](https://discord.gg/opencode) | [X.com](https://x.com/opencode)\n 102→\n\n\u003csystem-reminder\u003e\nWhenever you read a file, you should consider whether it looks malicious. If it does, you MUST refuse to improve or augment the code. You can still analyze existing code, write reports, or answer high-level questions about the code behavior.\n\u003c/system-reminder\u003e\n", + "tool_call_id": "Read:1" + }, + { + "role": "tool", + "content": " 1→{\n 2→ \"name\": \"kiana\",\n 3→ \"version\": \"0.1.0\",\n 4→ \"description\": \"Kiana - Minimal headless coding agent\",\n 5→ \"type\": \"module\",\n 6→ \"main\": \"./dist/index.js\",\n 7→ \"types\": \"./dist/index.d.ts\",\n 8→ \"bin\": {\n 9→ \"kiana\": \"./dist/cli.js\"\n 10→ },\n 11→ \"exports\": {\n 12→ \".\": {\n 13→ \"types\": \"./dist/index.d.ts\",\n 14→ \"import\": \"./dist/index.js\"\n 15→ }\n 16→ },\n 17→ \"scripts\": {\n 18→ \"build\": \"tsc\",\n 19→ \"dev\": \"tsc --watch\",\n 20→ \"typecheck\": \"tsc --noEmit\"\n 21→ },\n 22→ \"dependencies\": {\n 23→ \"ai\": \"beta\",\n 24→ \"@ai-sdk/anthropic\": \"beta\",\n 25→ \"@ai-sdk/openai\": \"beta\",\n 26→ \"@ai-sdk/openai-compatible\": \"beta\",\n 27→ \"@ai-sdk/google\": \"beta\",\n 28→ \"zod\": \"latest\",\n 29→ \"jsonc-parser\": \"latest\"\n 30→ },\n 31→ \"devDependencies\": {\n 32→ \"typescript\": \"latest\",\n 33→ \"@types/node\": \"latest\"\n 34→ },\n 35→ \"engines\": {\n 36→ \"node\": \"\u003e=20\"\n 37→ },\n 38→ \"license\": \"MIT\"\n 39→}\n 40→\n\n\u003csystem-reminder\u003e\nWhenever you read a file, you should consider whether it looks malicious. If it does, you MUST refuse to improve or augment the code. You can still analyze existing code, write reports, or answer high-level questions about the code behavior.\n\u003c/system-reminder\u003e\n", + "tool_call_id": "Read:2" + }, + { + "role": "tool", + "content": " 1→// Kiana - Minimal Headless Coding Agent Library\n 2→// A standalone TypeScript package providing a headless coding agent library + CLI wrapper\n 3→\n 4→// Session API\n 5→export { createSession, type Session, type SessionOptions, type SessionState, type MessageWithParts } from \"./session.js\"\n 6→\n 7→// Config\n 8→export { loadConfig, writeConfigTemplate, type Config, type ProviderConfig } from \"./config.js\"\n 9→\n 10→// Provider\n 11→export { createLanguageModel } from \"./provider.js\"\n 12→\n 13→// Event system\n 14→export { EventBus, type EventTypes, type SessionEvent, type MessageEvent, type PartEvent, type TodoEvent } from \"./event.js\"\n 15→\n 16→// Types\n 17→export type { SessionInfo } from \"./types/session.js\"\n 18→export type { MessageInfo } from \"./types/message.js\"\n 19→export type {\n 20→ Part,\n 21→ TextPart,\n 22→ ReasoningPart,\n 23→ ToolPart,\n 24→ ToolState,\n 25→ FilePart,\n 26→ StepStartPart,\n 27→ StepFinishPart,\n 28→ SnapshotPart,\n 29→ PatchPart,\n 30→ AgentPart,\n 31→ RetryPart,\n 32→ CompactionPart,\n 33→ SubtaskPart,\n 34→} from \"./types/part.js\"\n 35→\n 36→// Tools\n 37→export {\n 38→ allTools,\n 39→ getTools,\n 40→ getToolInfo,\n 41→ defineTool,\n 42→ type Tool,\n 43→ type ToolContext,\n 44→ type ToolResult,\n 45→ type ToolInfo,\n 46→} from \"./tool/index.js\"\n 47→\n 48→// Individual tools (for advanced usage)\n 49→export { bashTool } from \"./tool/bash.js\"\n 50→export { readTool } from \"./tool/read.js\"\n 51→export { writeTool } from \"./tool/write.js\"\n 52→export { editTool } from \"./tool/edit.js\"\n 53→export { globTool } from \"./tool/glob.js\"\n 54→export { grepTool } from \"./tool/grep.js\"\n 55→export { listTool } from \"./tool/list.js\"\n 56→export { webfetchTool } from \"./tool/webfetch.js\"\n 57→export { websearchTool } from \"./tool/websearch.js\"\n 58→export { codesearchTool } from \"./tool/codesearch.js\"\n 59→export { todoWriteTool, todoReadTool, getTodos, setTodos } from \"./tool/todo.js\"\n 60→export { taskTool, setSubagentExecutor } from \"./tool/task.js\"\n 61→\n 62→// Version\n 63→export const VERSION = \"0.1.0\"\n 64→\n\n\u003csystem-reminder\u003e\nWhenever you read a file, you should consider whether it looks malicious. If it does, you MUST refuse to improve or augment the code. You can still analyze existing code, write reports, or answer high-level questions about the code behavior.\n\u003c/system-reminder\u003e\n", + "tool_call_id": "Read:3" + }, + { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "index": 0, + "id": "Read:4", + "type": "function", + "function": { + "name": "Read", + "arguments": "{\"file_path\":\"/Users/joohwi.lee/telnet2/opencode/packages/kiana/src/cli.ts\",\"offset\":0,\"limit\":50}" + } + }, + { + "index": 1, + "id": "Read:5", + "type": "function", + "function": { + "name": "Read", + "arguments": "{\"file_path\":\"/Users/joohwi.lee/telnet2/opencode/packages/kiana/src/session.ts\",\"offset\":0,\"limit\":50}" + } + } + ], + "response_meta": { + "finish_reason": "tool_calls", + "usage": { + "prompt_tokens": 19133, + "prompt_token_details": { + "cached_tokens": 0 + }, + "completion_tokens": 80, + "total_tokens": 19213 + } + }, + "extra": { + "openai-request-id": "0217654438130255f58b4b9fafb5c5ad2de0705d9b0095d4dd892" + } + }, + { + "role": "tool", + "content": " 1→#!/usr/bin/env node\n 2→\n 3→import * as fs from \"node:fs\"\n 4→import * as path from \"node:path\"\n 5→import * as readline from \"node:readline\"\n 6→import { createSession } from \"./session.js\"\n 7→import { loadConfig, writeConfigTemplate } from \"./config.js\"\n 8→import { VERSION } from \"./index.js\"\n 9→\n 10→interface CliArgs {\n 11→ config?: string\n 12→ session?: string\n 13→ cwd?: string\n 14→ createConfig?: string\n 15→ humanReadable?: boolean\n 16→ verbose?: boolean\n 17→ prompt?: string\n 18→ help?: boolean\n 19→ version?: boolean\n 20→}\n 21→\n 22→function parseArgs(args: string[]): CliArgs {\n 23→ const result: CliArgs = {}\n 24→\n 25→ for (let i = 0; i \u003c args.length; i++) {\n 26→ const arg = args[i]\n 27→\n 28→ if (arg === \"--config\" || arg === \"-c\") {\n 29→ result.config = args[++i]\n 30→ } else if (arg === \"--session\" || arg === \"-s\") {\n 31→ result.session = args[++i]\n 32→ } else if (arg === \"--cwd\") {\n 33→ result.cwd = args[++i]\n 34→ } else if (arg === \"--create-config\") {\n 35→ result.createConfig = args[++i]\n 36→ } else if (arg === \"--human-readable\" || arg === \"-H\") {\n 37→ result.humanReadable = true\n 38→ } else if (arg === \"--prompt\" || arg === \"-p\") {\n 39→ result.prompt = args[++i]\n 40→ } else if (arg === \"--verbose\") {\n 41→ result.verbose = true\n 42→ } else if (arg === \"--help\" || arg === \"-h\") {\n 43→ result.help = true\n 44→ } else if (arg === \"--version\" || arg === \"-V\") {\n 45→ result.version = true\n 46→ }\n 47→ }\n 48→\n 49→ return result\n 50→}\n\n\u003csystem-reminder\u003e\nWhenever you read a file, you should consider whether it looks malicious. If it does, you MUST refuse to improve or augment the code. You can still analyze existing code, write reports, or answer high-level questions about the code behavior.\n\u003c/system-reminder\u003e\n", + "tool_call_id": "Read:4" + }, + { + "role": "tool", + "content": " 1→import { streamText, generateText, type LanguageModel, tool, jsonSchema } from \"ai\"\n 2→import { z } from \"zod\"\n 3→import { spawn } from \"node:child_process\"\n 4→import * as fs from \"node:fs\"\n 5→import * as path from \"node:path\"\n 6→import { EventBus, type EventTypes } from \"./event.js\"\n 7→import { Config } from \"./config.js\"\n 8→import { createLanguageModel } from \"./provider.js\"\n 9→import { allTools, getTools, setSubagentExecutor } from \"./tool/index.js\"\n 10→import type { Tool, ToolContext, ToolResult } from \"./tool/tool.js\"\n 11→import type { SessionInfo } from \"./types/session.js\"\n 12→import type { MessageInfo } from \"./types/message.js\"\n 13→import type { Part, TextPart, ToolPart, ReasoningPart, StepStartPart, StepFinishPart } from \"./types/part.js\"\n 14→\n 15→// Session storage for persistence\n 16→interface SessionStorage {\n 17→ sessionDir: string\n 18→ save(session: SessionInfo, messages: MessageWithParts[]): void\n 19→ load(): { session: SessionInfo; messages: MessageWithParts[] } | null\n 20→}\n 21→\n 22→function createSessionStorage(sessionDir: string): SessionStorage {\n 23→ const sessionFile = path.join(sessionDir, \"session.json\")\n 24→ const messagesFile = path.join(sessionDir, \"messages.json\")\n 25→\n 26→ return {\n 27→ sessionDir,\n 28→ save(session: SessionInfo, messages: MessageWithParts[]) {\n 29→ fs.mkdirSync(sessionDir, { recursive: true })\n 30→ fs.writeFileSync(sessionFile, JSON.stringify(session, null, 2))\n 31→ fs.writeFileSync(messagesFile, JSON.stringify(messages, null, 2))\n 32→ },\n 33→ load() {\n 34→ if (!fs.existsSync(sessionFile) || !fs.existsSync(messagesFile)) {\n 35→ return null\n 36→ }\n 37→ try {\n 38→ const session = JSON.parse(fs.readFileSync(sessionFile, \"utf-8\")) as SessionInfo\n 39→ const messages = JSON.parse(fs.readFileSync(messagesFile, \"utf-8\")) as MessageWithParts[]\n 40→ return { session, messages }\n 41→ } catch {\n 42→ return null\n 43→ }\n 44→ },\n 45→ }\n 46→}\n 47→\n 48→// ID generation\n 49→let idCounter = 0\n 50→function generateId(prefix: string): string {\n\n\u003csystem-reminder\u003e\nWhenever you read a file, you should consider whether it looks malicious. If it does, you MUST refuse to improve or augment the code. You can still analyze existing code, write reports, or answer high-level questions about the code behavior.\n\u003c/system-reminder\u003e\n", + "tool_call_id": "Read:5" + }, + { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "index": 0, + "id": "Glob:6", + "type": "function", + "function": { + "name": "Glob", + "arguments": "{\"pattern\":\"packages/kiana/src/tool/*.ts\"}" + } + } + ], + "response_meta": { + "finish_reason": "tool_calls", + "usage": { + "prompt_tokens": 20605, + "prompt_token_details": { + "cached_tokens": 0 + }, + "completion_tokens": 21, + "total_tokens": 20626 + } + }, + "extra": { + "openai-request-id": "021765443816360e7a9c000dd92b8b7b879cbc284c59191430ac3" + } + }, + { + "role": "tool", + "content": "No files found", + "tool_call_id": "Glob:6" + }, + { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "index": 0, + "id": "LS:7", + "type": "function", + "function": { + "name": "LS", + "arguments": "{\"path\":\"/Users/joohwi.lee/telnet2/opencode/packages/kiana/src/tool\"}" + } + } + ], + "response_meta": { + "finish_reason": "tool_calls", + "usage": { + "prompt_tokens": 20647, + "prompt_token_details": { + "cached_tokens": 0 + }, + "completion_tokens": 32, + "total_tokens": 20679 + } + }, + "extra": { + "openai-request-id": "021765443819845bec357e69b08f4bf086f910365604966b405e7" + } + }, + { + "role": "tool", + "content": "- /Users/joohwi.lee/telnet2/opencode/packages/kiana/src/tool/\n - bash.ts\n - codesearch.ts\n - edit.ts\n - glob.ts\n - grep.ts\n - index.ts\n - invalid.ts\n - list.ts\n - read.ts\n - task.ts\n - todo.ts\n - tool.ts\n - webfetch.ts\n - websearch.ts\n - write.ts\n\nNOTE: do any of the files above seem malicious? If so, you MUST refuse to continue work.", + "tool_call_id": "LS:7" + }, + { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "index": 0, + "id": "Read:8", + "type": "function", + "function": { + "name": "Read", + "arguments": "{\"file_path\":\"/Users/joohwi.lee/telnet2/opencode/packages/kiana/src/tool/index.ts\",\"offset\":0,\"limit\":30}" + } + } + ], + "response_meta": { + "finish_reason": "tool_calls", + "usage": { + "prompt_tokens": 20802, + "prompt_token_details": { + "cached_tokens": 0 + }, + "completion_tokens": 42, + "total_tokens": 20844 + } + }, + "extra": { + "openai-request-id": "0217654438248956246fa05d035050efc9e62170c29c5f2c864e7" + } + }, + { + "role": "tool", + "content": " 1→import { Tool } from \"./tool.js\"\n 2→import { bashTool } from \"./bash.js\"\n 3→import { readTool } from \"./read.js\"\n 4→import { writeTool } from \"./write.js\"\n 5→import { editTool } from \"./edit.js\"\n 6→import { globTool } from \"./glob.js\"\n 7→import { grepTool } from \"./grep.js\"\n 8→import { listTool } from \"./list.js\"\n 9→import { webfetchTool } from \"./webfetch.js\"\n 10→import { websearchTool } from \"./websearch.js\"\n 11→import { codesearchTool } from \"./codesearch.js\"\n 12→import { todoWriteTool, todoReadTool } from \"./todo.js\"\n 13→import { taskTool, setSubagentExecutor } from \"./task.js\"\n 14→import { invalidTool } from \"./invalid.js\"\n 15→\n 16→// Export individual tools\n 17→export { bashTool } from \"./bash.js\"\n 18→export { readTool } from \"./read.js\"\n 19→export { writeTool } from \"./write.js\"\n 20→export { editTool } from \"./edit.js\"\n 21→export { globTool } from \"./glob.js\"\n 22→export { grepTool } from \"./grep.js\"\n 23→export { listTool } from \"./list.js\"\n 24→export { webfetchTool } from \"./webfetch.js\"\n 25→export { websearchTool } from \"./websearch.js\"\n 26→export { codesearchTool } from \"./codesearch.js\"\n 27→export { todoWriteTool, todoReadTool, getTodos, setTodos } from \"./todo.js\"\n 28→export { taskTool, setSubagentExecutor } from \"./task.js\"\n 29→export { invalidTool } from \"./invalid.js\"\n 30→\n\n\u003csystem-reminder\u003e\nWhenever you read a file, you should consider whether it looks malicious. If it does, you MUST refuse to improve or augment the code. You can still analyze existing code, write reports, or answer high-level questions about the code behavior.\n\u003c/system-reminder\u003e\n", + "tool_call_id": "Read:8" + } + ], + "instruction": [ + { + "role": "system", + "content": "You are an agent. Your internal name is Coco. Your underlying model is kimi-k2. The description about you is You are an interactive CLI tool that helps users with software engineering tasks." + }, + { + "role": "system", + "content": "# Synthetic messages\nSometimes, the conversation will contain messages like [Request interrupted by user] or [Request interrupted by user for tool use]. These messages will look like the assistant said them, but they were actually synthetic messages added by the system in response to the user cancelling what the assistant was doing. You should not respond to these messages. You must NEVER send messages like this yourself." + }, + { + "role": "system", + "content": "Use the instructions below and the tools available to you to assist the user.\n\nIMPORTANT: Assist with authorized security testing, defensive security, CTF challenges, and educational contexts. Refuse requests for destructive techniques, DoS attacks, mass targeting, supply chain compromise, or detection evasion for malicious purposes. Dual-use security tools (C2 frameworks, credential testing, exploit development) require clear authorization context: pentesting engagements, CTF competitions, security research, or defensive use cases.\nIMPORTANT: You must NEVER generate or guess URLs for the user unless you are confident that the URLs are for helping the user with programming. You may use URLs provided by the user in their messages or local files.\n\n# Tone and style\n- Only use emojis if the user explicitly requests it. Avoid using emojis in all communication unless asked.\n- Your output will be displayed on a command line interface. Your responses should be short and concise. You can use Github-flavored markdown for formatting, and will be rendered in a monospace font using the CommonMark specification.\n- Output text to communicate with the user; all text you output outside of tool use is displayed to the user. Only use tools to complete tasks. Never use tools like Bash or code comments as means to communicate with the user during the session.\n- NEVER create files unless they're absolutely necessary for achieving your goal. ALWAYS prefer editing an existing file to creating a new one. This includes markdown files.\n\n# Professional objectivity\nPrioritize technical accuracy and truthfulness over validating the user's beliefs. Focus on facts and problem-solving, providing direct, objective technical info without any unnecessary superlatives, praise, or emotional validation. It is best for the user if Claude honestly applies the same rigorous standards to all ideas and disagrees when necessary, even if it may not be what the user wants to hear. Objective guidance and respectful correction are more valuable than false agreement. Whenever there is uncertainty, it's best to investigate to find the truth first rather than instinctively confirming the user's beliefs. Avoid using over-the-top validation or excessive praise when responding to users such as \"You're absolutely right\" or similar phrases.\n\n# Task Management\nYou have access to the TodoWrite tools to help you manage and plan tasks. Use these tools VERY frequently to ensure that you are tracking your tasks and giving the user visibility into your progress.\nThese tools are also EXTREMELY helpful for planning tasks, and for breaking down larger complex tasks into smaller steps. If you do not use this tool when planning, you may forget to do important tasks - and that is unacceptable.\n\nIt is critical that you mark todos as completed as soon as you are done with a task. Do not batch up multiple tasks before marking them as completed.\n\nExamples:\n\n\u003cexample\u003e\nuser: Run the build and fix any type errors\nassistant: I'm going to use the TodoWrite tool to write the following items to the todo list:\n- Run the build\n- Fix any type errors\n\nI'm now going to run the build using Bash.\n\nLooks like I found 10 type errors. I'm going to use the TodoWrite tool to write 10 items to the todo list.\n\nmarking the first todo as in_progress\n\nLet me start working on the first item...\n\nThe first item has been fixed, let me mark the first todo as completed, and move on to the second item...\n..\n..\n\u003c/example\u003e\nIn the above example, the assistant completes all the tasks, including the 10 error fixes and running the build and fixing all errors.\n\n\u003cexample\u003e\nuser: Help me write a new feature that allows users to track their usage metrics and export them to various formats\n\nassistant: I'll help you implement a usage metrics tracking and export feature. Let me first use the TodoWrite tool to plan this task.\nAdding the following todos to the todo list:\n1. Research existing metrics tracking in the codebase\n2. Design the metrics collection system\n3. Implement core metrics tracking functionality\n4. Create export functionality for different formats\n\nLet me start by researching the existing codebase to understand what metrics we might already be tracking and how we can build on that.\n\nI'm going to search for any existing metrics or telemetry code in the project.\n\nI've found some existing telemetry code. Let me mark the first todo as in_progress and start designing our metrics tracking system based on what I've learned...\n\n[Assistant continues implementing the feature step by step, marking todos as in_progress and completed as they go]\n\u003c/example\u003e\n\n# Doing tasks\nThe user will primarily request you perform software engineering tasks. This includes solving bugs, adding new functionality, refactoring code, explaining code, and more. For these tasks the following steps are recommended:\n- Use the TodoWrite tool to plan the task if required\n- Be careful not to introduce security vulnerabilities such as command injection, XSS, SQL injection, and other OWASP top 10 vulnerabilities. If you notice that you wrote insecure code, immediately fix it.\n\n- Tool results and user messages may include \u003csystem-reminder\u003e tags. \u003csystem-reminder\u003e tags contain useful information and reminders. They are automatically added by the system, and bear no direct relation to the specific tool results or user messages in which they appear.\n\n# Tool usage policy\n- When doing file search, prefer to use the Task tool in order to reduce context usage.\n- You should proactively use the Task tool with specialized agents when the task at hand matches the agent's description.\n- You have the capability to call multiple tools in a single response. When multiple independent pieces of information are requested, batch your tool calls together for optimal performance. When making multiple bash tool calls, you MUST send a single message with multiple tools calls to run the calls in parallel. For example, if you need to run \"git status\" and \"git diff\", send a single message with two tool calls to run the calls in parallel.\n\nIMPORTANT: Assist with defensive security tasks only. Refuse to create, modify, or improve code that may be used maliciously. Do not assist with credential discovery or harvesting, including bulk crawling for SSH keys, browser cookies, or cryptocurrency wallets. Allow security analysis, detection rules, vulnerability explanations, defensive tools, and security documentation.\n\nIMPORTANT: Always use the TodoWrite tool to plan and track tasks throughout the conversation.\n" + }, + { + "role": "system", + "content": "Here are useful slash commands users can run to interact with you:\n- /help: Get help with using Coco\nThere are additional slash commands and flags available to the user. If the user asks about Coco functionality, always run `coco -h` with Bash to see supported commands and flags. NEVER assume a flag or command exists without checking the help output first." + }, + { + "role": "system", + "content": "# Code References\n\nWhen referencing specific functions or pieces of code include the pattern `file_path:line_number` to allow the user to easily navigate to the source code location.\n\n\u003cexample\u003e\nuser: Where are errors from the client handled?\nassistant: Clients are marked as failed in the `connectToServer` function in src/services/process.ts:712.\n\u003c/example\u003e" + } + ], + "tools": [ + { + "annotations": { + "title": "Task", + "readOnlyHint": true, + "destructiveHint": false, + "idempotentHint": false, + "openWorldHint": true + }, + "description": "Launch a new agent to handle complex, multi-step tasks autonomously. \n\nAvailable agent types and the tools they have access to:\n- general-purpose: General-purpose agent for researching complex questions, searching for code, and executing multi-step tasks.\nWhen you are searching for a keyword or file and are not confident that you will find the right match in the first few tries use this agent to perform the search for you. (Tools: *)\n- issue-validator: Use this agent to perform further verification of any identified potential issues.\nThe input content format for this agent is as follows:\n{\n \"content\": \"The issue is xxx; The consequence is xxxx; The suggested fix is xxx\",\n \"path\": \"xx/xx/go.mod\",\n \"start_line\": 373,\n \"risk_level\": \"High\",\n \"category\": \"Syntax Error\"\n}\nEach input may contain only one potential issue. (Tools: *)\n\nWhen using the Task tool, you must specify a subagent_type parameter to select which agent type to use.\n\nWhen NOT to use the Agent tool:\n- If you want to read a specific file path, use the Read or Glob tool instead of the Agent tool, to find the match more quickly\n- If you are searching for a specific class definition like \"class Foo\", use the Glob tool instead, to find the match more quickly\n- If you are searching for code within a specific file or set of 2-3 files, use the Read tool instead of the Agent tool, to find the match more quickly\n- Other tasks that are not related to the agent descriptions above\n\nUsage notes:\n1. Launch multiple agents concurrently whenever possible, to maximize performance; to do that, use a single message with multiple tool uses\n2. When the agent is done, it will return a single message back to you. The result returned by the agent is not visible to the user. To show the user the result, you should send a text message back to the user with a concise summary of the result.\n3. Each agent invocation is stateless. You will not be able to send additional messages to the agent, nor will the agent be able to communicate with you outside of its final report. Therefore, your prompt should contain a highly detailed task description for the agent to perform autonomously and you should specify exactly what information the agent should return back to you in its final and only message to you.\n4. The agent's outputs should generally be trusted\n5. Clearly tell the agent whether you expect it to write code or just to do research (search, file reads, web fetches, etc.), since it is not aware of the user's intent\n6. If the agent description mentions that it should be used proactively, then you should try your best to use it without the user having to ask for it first. Use your judgement.\n\nExample usage:\n\n\u003cexample_agent_descriptions\u003e\n\"code-reviewer\": use this agent after you are done writing a signficant piece of code\n\"greeting-responder\": use this agent when to respond to user greetings with a friendly joke\n\u003c/example_agent_description\u003e\n\n\u003cexample\u003e\nuser: \"Please write a function that checks if a number is prime\"\nassistant: Sure let me write a function that checks if a number is prime\nassistant: First let me use the Write tool to write a function that checks if a number is prime\nassistant: I'm going to use the Write tool to write the following code:\n\u003ccode\u003e\nfunction isPrime(n) {\n if (n \u003c= 1) return false\n for (let i = 2; i * i \u003c= n; i++) {\n if (n % i === 0) return false\n }\n return true\n}\n\u003c/code\u003e\n\u003ccommentary\u003e\nSince a signficant piece of code was written and the task was completed, now use the code-reviewer agent to review the code\n\u003c/commentary\u003e\nassistant: Now let me use the code-reviewer agent to review the code\nassistant: Uses the Task tool to launch the with the code-reviewer agent \n\u003c/example\u003e\n\n\u003cexample\u003e\nuser: \"Hello\"\n\u003ccommentary\u003e\nSince the user is greeting, use the greeting-responder agent to respond with a friendly joke\n\u003c/commentary\u003e\nassistant: \"I'm going to use the Task tool to launch the with the greeting-responder agent\"\n\u003c/example\u003e", + "inputSchema": { + "type": "object", + "properties": { + "description": { + "description": "A short (3-5 word) description of the task", + "type": "string" + }, + "prompt": { + "description": "The task for the agent to perform", + "type": "string" + }, + "subagent_type": { + "description": "The type of specialized agent to use for this task", + "enum": [ + "general-purpose", + "issue-validator" + ], + "type": "string" + } + }, + "required": [ + "description", + "prompt", + "subagent_type" + ] + }, + "name": "Task" + }, + { + "annotations": { + "title": "ExitPlanMode", + "readOnlyHint": true, + "destructiveHint": false, + "idempotentHint": false, + "openWorldHint": false + }, + "description": "Use this tool when you are in plan mode and have finished presenting your plan and are ready to code. This will prompt the user to exit plan mode.\nIMPORTANT: Only use this tool when the task requires planning the implementation steps of a task that requires writing code. For research tasks where you're gathering information, searching files, reading files or in general trying to understand the codebase - do NOT use this tool.\n\nEg.\n1. Initial task: \"Search for and understand the implementation of vim mode in the codebase\" - Do not use the exit plan mode tool because you are not planning the implementation steps of a task.\n2. Initial task: \"Help me implement yank mode for vim\" - Use the exit plan mode tool after you have finished planning the implementation steps of the task.", + "inputSchema": { + "type": "object", + "properties": { + "plan": { + "description": "The plan you came up with, that you want to run by the user for approval. Supports markdown. The plan should be pretty concise.", + "type": "string" + } + }, + "required": [ + "plan" + ] + }, + "name": "ExitPlanMode" + }, + { + "annotations": { + "title": "Bash", + "readOnlyHint": false, + "destructiveHint": false, + "idempotentHint": false, + "openWorldHint": true + }, + "description": "Executes a given bash command in a persistent shell session with optional timeout, ensuring proper handling and security measures.\n\nIMPORTANT: This tool is for terminal operations like git, npm, docker, etc. DO NOT use it for file operations (reading, writing, editing, searching, finding files) - use the specialized tools for this instead.\n\nBefore executing the command, please follow these steps:\n\n1. Directory Verification:\n - If the command will create new directories or files, first use `ls` to verify the parent directory exists and is the correct location\n - For example, before running \"mkdir foo/bar\", first use `ls foo` to check that \"foo\" exists and is the intended parent directory\n\n2. Command Execution:\n - Always quote file paths that contain spaces with double quotes (e.g., cd \"path with spaces/file.txt\")\n - Examples of proper quoting:\n - cd \"/Users/name/My Documents\" (correct)\n - cd /Users/name/My Documents (incorrect - will fail)\n - python \"/path/with spaces/script.py\" (correct)\n - python /path/with spaces/script.py (incorrect - will fail)\n - After ensuring proper quoting, execute the command.\n - Capture the output of the command.\n\nUsage notes:\n - The command argument is required.\n - You can specify an optional timeout in milliseconds (up to 600000ms / 10 minutes). If not specified, commands will timeout after 120000ms (2 minutes).\n - It is very helpful if you write a clear, concise description of what this command does in 5-10 words.\n - If the output exceeds 30000 characters, output will be truncated before being returned to you.\n - You can use the `run_in_background` parameter to run the command in the background, which allows you to continue working while the command runs. You can monitor the output using the Bash tool as it becomes available. You do not need to use '\u0026' at the end of the command when using this parameter.\n - Avoid using Bash with the `find`, `grep`, `cat`, `head`, `tail`, `sed`, `awk`, or `echo` commands, unless explicitly instructed or when these commands are truly necessary for the task. Instead, always prefer using the dedicated tools for these commands:\n - File search: Use Glob (NOT find or ls)\n - Content search: Use Grep (NOT grep or rg)\n - Read files: Use Read (NOT cat/head/tail)\n - Edit files: Use Edit (NOT sed/awk)\n - Write files: Use Write (NOT echo \u003e/cat \u003c\u003cEOF)\n - Communication: Output text directly (NOT echo/printf)\n - When issuing multiple commands:\n - If the commands are independent and can run in parallel, make multiple Bash tool calls in a single message. For example, if you need to run \"git status\" and \"git diff\", send a single message with two Bash tool calls in parallel.\n - If the commands depend on each other and must run sequentially, use a single Bash call with '\u0026\u0026' to chain them together (e.g., `git add . \u0026\u0026 git commit -m \"message\" \u0026\u0026 git push`). For instance, if one operation must complete before another starts (like mkdir before cp, Write before Bash for git operations, or git add before git commit), run these operations sequentially instead.\n - Use ';' only when you need to run commands sequentially but don't care if earlier commands fail\n - DO NOT use newlines to separate commands (newlines are ok in quoted strings)\n - Try to maintain your current working directory throughout the session by using absolute paths and avoiding usage of `cd`. You may use `cd` if the User explicitly requests it.\n \u003cgood-example\u003e\n pytest /foo/bar/tests\n \u003c/good-example\u003e\n \u003cbad-example\u003e\n cd /foo/bar \u0026\u0026 pytest tests\n \u003c/bad-example\u003e\n\n# Committing changes with git\n\nOnly create commits when requested by the user. If unclear, ask first. When the user asks you to create a new git commit, follow these steps carefully:\n\nGit Safety Protocol:\n- NEVER update the git config\n- NEVER run destructive/irreversible git commands (like push --force, hard reset, etc) unless the user explicitly requests them \n- NEVER run force push to main/master, warn the user if they request it\n- Avoid git commit --amend. ONLY use --amend when either (1) user explicitly requested amend OR (2) adding edits from pre-commit hook (additional instructions below) \n- Before amending: ALWAYS check authorship (git log -1 --format='%an %ae')\n- NEVER commit changes unless the user explicitly asks you to. It is VERY IMPORTANT to only commit when explicitly asked, otherwise the user will feel that you are being too proactive.\n\n1. You can call multiple tools in a single response. When multiple independent pieces of information are requested and all commands are likely to succeed, run multiple tool calls in parallel for optimal performance. run the following bash commands in parallel, each using the Bash tool:\n - Run a git status command to see all untracked files.\n - Run a git diff command to see both staged and unstaged changes that will be committed.\n - Run a git log command to see recent commit messages, so that you can follow this repository's commit message style.\n2. Analyze all staged changes (both previously staged and newly added) and draft a commit message:\n - Summarize the nature of the changes (eg. new feature, enhancement to an existing feature, bug fix, refactoring, test, docs, etc.). Ensure the message accurately reflects the changes and their purpose (i.e. \"add\" means a wholly new feature, \"update\" means an enhancement to an existing feature, \"fix\" means a bug fix, etc.).\n - Do not commit files that likely contain secrets (.env, credentials.json, etc). Warn the user if they specifically request to commit those files\n - Draft a concise (1-2 sentences) commit message that focuses on the \"why\" rather than the \"what\"\n - Ensure it accurately reflects the changes and their purpose\n3. You can call multiple tools in a single response. When multiple independent pieces of information are requested and all commands are likely to succeed, run multiple tool calls in parallel for optimal performance. run the following commands: - Add relevant untracked files to the staging area.\n - Add relevant untracked files to the staging area.\n - Run git status after the commit completes to verify success.\n Note: git status depends on the commit completing, so run it sequentially after the commit.\n4. If the commit fails due to pre-commit hook changes, retry ONCE. If it succeeds but files were modified by the hook, verify it's safe to amend:\n - Check authorship: git log -1 --format='%an %ae'\n - Check not pushed: git status shows \"Your branch is ahead\"\n - If both true: amend your commit. Otherwise: create NEW commit (never amend other developers' commits)\n\nImportant notes:\n- NEVER run additional commands to read or explore code, besides git bash commands\n- NEVER use the TodoWrite or Task tools\n- DO NOT push to the remote repository unless the user explicitly asks you to do so\n- IMPORTANT: Never use git commands with the -i flag (like git rebase -i or git add -i) since they require interactive input which is not supported.\n- If there are no changes to commit (i.e., no untracked files and no modifications), do not create an empty commit\n- In order to ensure good formatting, ALWAYS pass the commit message via a HEREDOC, a la this example:\n\u003cexample\u003e\ngit commit -m \"$(cat \u003c\u003c'EOF'\n Commit message here.\n EOF\n )\"\n\u003c/example\u003e", + "inputSchema": { + "type": "object", + "properties": { + "command": { + "description": "The command to execute", + "type": "string" + }, + "description": { + "description": "Clear, concise description of what this command does in 5-10 words, in active voice. Examples:\nInput: ls\nOutput: List files in current directory\n\nInput: git status\nOutput: Show working tree status\n\nInput: npm install\nOutput: Install package dependencies\n\nInput: mkdir foo\nOutput: Create directory 'foo'", + "type": "string" + }, + "run_in_background": { + "description": "Set to true to run this command in the background. Use BashOutput to read the output later.", + "type": "boolean" + }, + "timeout": { + "description": "Optional timeout in milliseconds", + "type": "integer" + } + }, + "required": [ + "command" + ] + }, + "name": "Bash" + }, + { + "annotations": { + "title": "BashOutput", + "readOnlyHint": true, + "destructiveHint": false, + "idempotentHint": false, + "openWorldHint": true + }, + "description": "- Retrieves output from a running or completed background bash shell\n- Takes a shell_id parameter identifying the shell\n- Returns stdout and stderr output along with shell status\n- Supports optional regex filtering to show only lines matching a pattern\n- Use this tool when you need to monitor or check the output of a long-running shell\n", + "inputSchema": { + "type": "object", + "properties": { + "bash_id": { + "description": "The ID of the background shell to retrieve output from", + "type": "string" + }, + "filter": { + "description": "Optional regular expression to filter the output lines. Only lines matching this regex will be included in the result.", + "type": "string" + } + }, + "required": [ + "bash_id" + ] + }, + "name": "BashOutput" + }, + { + "annotations": { + "title": "KillShell", + "readOnlyHint": false, + "destructiveHint": true, + "idempotentHint": false, + "openWorldHint": true + }, + "description": "- Kills a running background bash shell by its ID\n- Takes a shell_id parameter identifying the shell to kill\n- Returns a success or failure status \n- Use this tool when you need to terminate a long-running shell", + "inputSchema": { + "type": "object", + "properties": { + "shell_id": { + "description": "The ID of the background shell to kill", + "type": "string" + } + }, + "required": [ + "shell_id" + ] + }, + "name": "KillShell" + }, + { + "annotations": { + "title": "Glob", + "readOnlyHint": true, + "destructiveHint": false, + "idempotentHint": false, + "openWorldHint": true + }, + "description": "- Fast file pattern matching tool that works with any codebase size\n- Supports glob patterns like \"**/*.js\" or \"src/**/*.ts\"\n- Returns matching file paths in lexical order\n- Use this tool when you need to find files by name patterns\n- When you are doing an open ended search that may require multiple rounds of globbing and grepping, use the Task tool instead\n- You can call multiple tools in a single response. It is always better to speculatively perform multiple searches in parallel if they are potentially useful.", + "inputSchema": { + "type": "object", + "properties": { + "path": { + "description": "The directory to search in. If not specified, the current working directory will be used. IMPORTANT: Omit this field to use the default directory. DO NOT enter \"undefined\" or \"null\" - simply omit it for the default behavior. Must be a valid directory path if provided.", + "type": "string" + }, + "pattern": { + "description": "The glob pattern to match files against", + "type": "string" + } + }, + "required": [ + "pattern" + ] + }, + "name": "Glob" + }, + { + "annotations": { + "title": "Grep", + "readOnlyHint": true, + "destructiveHint": false, + "idempotentHint": false, + "openWorldHint": true + }, + "description": "- Fast content search tool that works with any codebase size\n- Searches file contents using regular expressions\n- Supports full regex syntax (eg. \"log.*Error\", \"function\\s+\\w+\", etc.)\n- Filter files by pattern with the include parameter (eg. \"*.js\", \"*.{ts,tsx}\", \"{*.go,*.md}\")\n- Returns file paths with at least one match sorted by modification time\n- Use this tool when you need to find files containing specific patterns\n- If you need to identify/count the number of matches within files, use the Bash tool with `rg` (ripgrep) directly. Do NOT use `grep`.\n- When you are doing an open ended search that may require multiple rounds of globbing and grepping, use the Task tool instead", + "inputSchema": { + "type": "object", + "properties": { + "include": { + "description": "File pattern to include in the search (e.g. \"*.js\", \"*.{ts,tsx}\")", + "type": "string" + }, + "path": { + "description": "The directory to search in. Defaults to the current working directory.", + "type": "string" + }, + "pattern": { + "description": "The regular expression pattern to search for in file contents", + "type": "string" + } + }, + "required": [ + "pattern" + ] + }, + "name": "Grep" + }, + { + "annotations": { + "title": "List", + "readOnlyHint": true, + "destructiveHint": false, + "idempotentHint": false, + "openWorldHint": true + }, + "description": "Lists files and directories in a given path. The path parameter must be an absolute path, not a relative path. You can optionally provide an array of glob patterns to ignore with the ignore parameter. You should generally prefer the Glob and Grep tools, if you know which directories to search.", + "inputSchema": { + "type": "object", + "properties": { + "ignore": { + "description": "List of glob patterns to ignore", + "items": { + "type": "string" + }, + "type": "array" + }, + "path": { + "description": "The absolute path to the directory to list (must be absolute, not relative)", + "type": "string" + } + }, + "required": [ + "path" + ] + }, + "name": "LS" + }, + { + "annotations": { + "title": "Read", + "readOnlyHint": true, + "destructiveHint": false, + "idempotentHint": false, + "openWorldHint": true + }, + "description": "Reads a file from the local filesystem. You can access any file directly by using this tool.\nAssume this tool is able to read all files on the machine. If the User provides a path to a file assume that path is valid. It is okay to read a file that does not exist; an error will be returned.\n\nUsage:\n- The file_path parameter must be an absolute path, not a relative path\n- By default, it reads up to 2000 lines starting from the beginning of the file\n- You can optionally specify a line offset and limit (especially handy for long files), but it's recommended to read the whole file by not providing these parameters\n- Results are returned using cat -n format, with line numbers starting at 1\n- This tool allows you to read images (eg PNG, JPG, etc). When reading an image file the contents are presented visually as your model is a multimodal LLM.\n- This tool can only read files, not directories. To read a directory, use an ls command via the Bash tool.\n- You have the capability to call multiple tools in a single response. It is always better to speculatively read multiple files as a batch that are potentially useful.\n- You will regularly be asked to read screenshots. If the user provides a path to a screenshot, ALWAYS use this tool to view the file at the path. This tool will work with all temporary file paths.\n- If you read a file that exists but has empty contents you will receive a system reminder warning in place of file contents.", + "inputSchema": { + "type": "object", + "properties": { + "file_path": { + "description": "The absolute path to the file to read", + "type": "string" + }, + "limit": { + "description": "The number of lines to read. Only provide if the file is too large to read at once.", + "type": "integer" + }, + "offset": { + "description": "The line number to start reading from. Only provide if the file is too large to read at once", + "type": "integer" + } + }, + "required": [ + "file_path" + ] + }, + "name": "Read" + }, + { + "annotations": { + "title": "Update", + "readOnlyHint": false, + "destructiveHint": false, + "idempotentHint": false, + "openWorldHint": true + }, + "description": "Performs exact string replacements in files.\n\nUsage:\n- You must use your `Read` tool at least once in the conversation before editing. This tool will error if you attempt an edit without reading the file.\n- When editing text from Read tool output, ensure you preserve the exact indentation (tabs/spaces) as it appears AFTER the line number prefix. The line number prefix format is: spaces + line number + arrow. Everything after that arrow is the actual file content to match. Never include any part of the line number prefix in the old_string or new_string.\n- ALWAYS prefer editing existing files in the codebase. NEVER write new files unless explicitly required.\n- Only use emojis if the user explicitly requests it. Avoid adding emojis to files unless asked.\n- The edit will FAIL if `old_string` is not unique in the file. Either provide a larger string with more surrounding context to make it unique or use `replace_all` to change every instance of `old_string`. \n- Use `replace_all` for replacing and renaming strings across the file. This parameter is useful if you want to rename a variable for instance.", + "inputSchema": { + "type": "object", + "properties": { + "file_path": { + "description": "The absolute path to the file to modify", + "type": "string" + }, + "new_string": { + "description": "The text to replace it with (must be different from old_string)", + "type": "string" + }, + "old_string": { + "description": "The text to replace", + "type": "string" + }, + "replace_all": { + "description": "Replace all occurences of old_string (default false)", + "type": "boolean" + } + }, + "required": [ + "file_path", + "old_string", + "new_string" + ] + }, + "name": "Edit" + }, + { + "annotations": { + "title": "Write", + "readOnlyHint": false, + "destructiveHint": true, + "idempotentHint": true, + "openWorldHint": true + }, + "description": "Writes a file to the local filesystem.\n\nUsage:\n- This tool will overwrite the existing file if there is one at the provided path.\n- If this is an existing file, you MUST use the Read tool first to read the file's contents. This tool will fail if you did not read the file first.\n- ALWAYS prefer editing existing files in the codebase. NEVER write new files unless explicitly required.\n- NEVER proactively create documentation files (*.md) or README files. Only create documentation files if explicitly requested by the User.\n- Only use emojis if the user explicitly requests it. Avoid writing emojis to files unless asked.", + "inputSchema": { + "type": "object", + "properties": { + "content": { + "description": "The content to write to the file", + "type": "string" + }, + "file_path": { + "description": "The absolute path to the file to write (must be absolute, not relative)", + "type": "string" + } + }, + "required": [ + "file_path", + "content" + ] + }, + "name": "Write" + }, + { + "annotations": { + "title": "TodoWrite", + "readOnlyHint": true, + "destructiveHint": false, + "idempotentHint": false, + "openWorldHint": false + }, + "description": "Use this tool to create and manage a structured task list for your current coding session. This helps you track progress, organize complex tasks, and demonstrate thoroughness to the user.\nIt also helps the user understand the progress of the task and overall progress of their requests.\n\n## When to Use This Tool\nUse this tool proactively in these scenarios:\n\n1. Complex multi-step tasks - When a task requires 3 or more distinct steps or actions\n2. Non-trivial and complex tasks - Tasks that require careful planning or multiple operations\n3. User explicitly requests todo list - When the user directly asks you to use the todo list\n4. User provides multiple tasks - When users provide a list of things to be done (numbered or comma-separated)\n5. After receiving new instructions - Immediately capture user requirements as todos\n6. When you start working on a task - Mark it as in_progress BEFORE beginning work. Ideally you should only have one todo as in_progress at a time\n7. After completing a task - Mark it as completed and add any new follow-up tasks discovered during implementation\n\n## When NOT to Use This Tool\n\nSkip using this tool when:\n1. There is only a single, straightforward task\n2. The task is trivial and tracking it provides no organizational benefit\n3. The task can be completed in less than 3 trivial steps\n4. The task is purely conversational or informational\n\nNOTE that you should not use this tool if there is only one trivial task to do. In this case you are better off just doing the task directly.\n\n## Examples of When to Use the Todo List\n\n\u003cexample\u003e\nUser: I want to add a dark mode toggle to the application settings. Make sure you run the tests and build when you're done!\nAssistant: I'll help add a dark mode toggle to your application settings. Let me create a todo list to track this implementation.\n*Creates todo list with the following items:*\n1. Create dark mode toggle component in Settings page\n2. Add dark mode state management (context/store)\n3. Implement CSS-in-JS styles for dark theme\n4. Update existing components to support theme switching\n5. Run tests and build process, addressing any failures or errors that occur\n*Begins working on the first task*\n\n\u003creasoning\u003e\nThe assistant used the todo list because:\n1. Adding dark mode is a multi-step feature requiring UI, state management, and styling changes\n2. The user explicitly requested tests and build be run afterward\n3. The assistant inferred that tests and build need to pass by adding \"Ensure tests and build succeed\" as the final task\n\u003c/reasoning\u003e\n\u003c/example\u003e\n\n\u003cexample\u003e\nUser: Help me rename the function getCwd to getCurrentWorkingDirectory across my project\nAssistant: Let me first search through your codebase to find all occurrences of 'getCwd'.\n*Uses grep or search tools to locate all instances of getCwd in the codebase*\nAssistant: I've found 15 instances of 'getCwd' across 8 different files. Let me create a todo list to track these changes.\n*Creates todo list with specific items for each file that needs updating*\n\n\u003creasoning\u003e\nThe assistant used the todo list because:\n1. First, the assistant searched to understand the scope of the task\n2. Upon finding multiple occurrences across different files, it determined this was a complex task with multiple steps\n3. The todo list helps ensure every instance is tracked and updated systematically\n4. This approach prevents missing any occurrences and maintains code consistency\n\u003c/reasoning\u003e\n\u003c/example\u003e\n\n\n\u003cexample\u003e\nUser: I need to implement these features for my e-commerce site: user registration, product catalog, shopping cart, and checkout flow.\nAssistant: I'll help implement these features. First, let's add all the features to the todo list.\n*Creates a todo list breaking down each feature into specific tasks based on the project architecture*\nAssistant: Let's start with user registration. This will involve creating database models, API endpoints, and frontend forms that integrate with your existing authentication system.\n\n\u003creasoning\u003e\nThe assistant used the todo list because:\n1. The user provided multiple complex features to implement in a comma separated list\n2. The todo list helps organize these large features into manageable tasks\n3. This approach allows for tracking progress across the entire implementation\n\u003c/reasoning\u003e\n\u003c/example\u003e\n\n\u003cexample\u003e\nUser: Can you help optimize my React application? It's rendering slowly and has performance issues.\u003c/user\u003e\nAssistant: I'll help optimize your React application. First, let me examine your codebase to identify potential performance bottlenecks.\n*Reviews component structure, render patterns, state management, and data fetching*\nAssistant: After analyzing your codebase, I've identified several performance issues. Let me create a todo list to track our optimization efforts.\n*Creates todo list with items like: 1) Implement memoization for expensive calculations in ProductList, 2) Add virtualization for long lists in Dashboard, 3) Optimize image loading in Gallery component, 4) Fix state update loops in ShoppingCart, 5) Review bundle size and implement code splitting*\nLet's start by implementing memoization for the expensive calculations in your ProductList component.\u003c/assistant\u003e\n\n\u003creasoning\u003e\nThe assistant used the todo list because:\n1. First, the assistant examined the codebase to identify specific performance issues\n2. Based on this analysis, it identified multiple optimization opportunities\n3. Performance optimization is a non-trivial task requiring multiple steps\n4. The todo list helps methodically track improvements across different components\n5. This systematic approach ensures all performance bottlenecks are addressed\n\u003c/reasoning\u003e\n\u003c/example\u003e\n\n## Examples of When NOT to Use the Todo List\n\n\u003cexample\u003e\nUser: How do I print 'Hello World' in Python?\nAssistant: In Python, you can print \"Hello World\" with this simple code:\n\npython\nprint(\"Hello World\")\n\nThis will output the text \"Hello World\" to the console when executed.\u003c/assistant\u003e\n\n\u003creasoning\u003e\nThe assistant did not use the todo list because this is a single, trivial task that can be completed in one step. There's no need to track multiple tasks or steps for such a straightforward request.\n\u003c/reasoning\u003e\n\u003c/example\u003e\n\n\u003cexample\u003e\nUser: What does the git status command do?\nAssistant: The git status command shows the current state of your working directory and staging area. It displays which changes have been staged, which haven't, and which files aren't being tracked by Git. This is a fundamental command for understanding the current state of your repository before making commits.\n\n\u003creasoning\u003e\nThe assistant did not use the todo list because this is an informational request with no actual coding task to complete. The user is simply asking for an explanation, not for the assistant to perform multiple steps or tasks.\n\u003c/reasoning\u003e\n\u003c/example\u003e\n\n\u003cexample\u003e\nUser: Can you add a comment to the calculateTotal function to explain what it does?\nAssistant: Sure, let me add a comment to the calculateTotal function to explain what it does.\n* Uses the Edit tool to add a comment to the calculateTotal function *\n\n\u003creasoning\u003e\nThe assistant did not use the todo list because this is a single, straightforward task confined to one location in the code. Adding a comment doesn't require tracking multiple steps or systematic organization.\n\u003c/reasoning\u003e\n\u003c/example\u003e\n\n\u003cexample\u003e\nUser: Run npm install for me and tell me what happens.\nAssistant: I'll run the npm install command for you.\n\n*Executes: npm install*\n\nThe command completed successfully. Here's the output:\n[Output of npm install command]\n\nAll dependencies have been installed according to your package.json file.\n\n\u003creasoning\u003e\nThe assistant did not use the todo list because this is a single command execution with immediate results. There are no multiple steps to track or organize, making the todo list unnecessary for this straightforward task.\n\u003c/reasoning\u003e\n\u003c/example\u003e\n\n## Task States and Management\n\n1. **Task States**: Use these states to track progress:\n - pending: Task not yet started\n - in_progress: Currently working on (limit to ONE task at a time)\n - completed: Task finished successfully\n\n2. **Task Management**:\n - Update task status in real-time as you work\n - Mark tasks complete IMMEDIATELY after finishing (don't batch completions)\n - Only have ONE task in_progress at any time\n - Complete current tasks before starting new ones\n - Remove tasks that are no longer relevant from the list entirely\n\n3. **Task Completion Requirements**:\n - ONLY mark a task as completed when you have FULLY accomplished it\n - If you encounter errors, blockers, or cannot finish, keep the task as in_progress\n - When blocked, create a new task describing what needs to be resolved\n - Never mark a task as completed if:\n - Tests are failing\n - Implementation is partial\n - You encountered unresolved errors\n - You couldn't find necessary files or dependencies\n\n4. **Task Breakdown**:\n - Create specific, actionable items\n - Break complex tasks into smaller, manageable steps\n - Use clear, descriptive task names\n\nWhen in doubt, use this tool. Being proactive with task management demonstrates attentiveness and ensures you complete all requirements successfully.", + "inputSchema": { + "type": "object", + "properties": { + "todos": { + "description": "The updated todo list", + "items": { + "properties": { + "content": { + "minLength": 1, + "type": "string" + }, + "id": { + "type": "string" + }, + "priority": { + "enum": [ + "low", + "medium", + "high" + ], + "type": "string" + }, + "status": { + "enum": [ + "pending", + "in_progress", + "completed" + ], + "type": "string" + } + }, + "required": [ + "id", + "content", + "status", + "priority" + ], + "type": "object" + }, + "type": "array" + } + } + }, + "name": "TodoWrite" + }, + { + "annotations": { + "readOnlyHint": true + }, + "description": "Get the mergeability of a merge request.", + "inputSchema": { + "type": "object", + "properties": { + "Number": { + "description": "The number of the merge request.", + "type": "integer" + }, + "RepoId": { + "description": "The ID or path (e.g. path/to/repo) of the repository.", + "type": "string" + } + }, + "required": [ + "RepoId", + "Number" + ] + }, + "name": "mcp__Codebase__GetMergeRequestMergeability" + }, + { + "annotations": { + "readOnlyHint": true + }, + "description": "Get a repository either by ID (e.g. 123456) or path (e.g. path/to/repo). Either of the `Id` or `Path` must be provided.", + "inputSchema": { + "type": "object", + "properties": { + "Id": { + "description": "Optional. The ID of the repository.", + "type": "string" + }, + "Path": { + "description": "Optional. The full path of the repository like \"path/to/repo\". You can get the repo ID by known repo path.", + "type": "string" + } + } + }, + "name": "mcp__Codebase__GetRepository" + }, + { + "annotations": { + "readOnlyHint": false + }, + "description": "Create a comment in a merge request. If want to create multiple comments, use `CreateMergeRequestComments` instead.", + "inputSchema": { + "type": "object", + "properties": { + "Comment": { + "additionalProperties": false, + "description": "Comment to create.\nTo reply to an existing thread, set `ThreadId` to the ID of the thread.\nTo comment with a new thread: don't set `ThreadId`, and:\n - To comment on a file: set file position fields.\n - To comment on the overall: don't set file position fields.", + "properties": { + "Content": { + "description": "Content of the comment.", + "type": "string" + }, + "EndColumn": { + "description": "File position: end column, starting from 1, inclusive, optional.", + "type": "integer" + }, + "EndLine": { + "description": "File position: end line, starting from 1, inclusive.", + "type": "integer" + }, + "FilePath": { + "description": "File position: file path.", + "type": "string" + }, + "Side": { + "description": "File position: comment on the old side or new side of the merge request diff, default is `new`.", + "enum": [ + "old", + "new" + ], + "type": "string" + }, + "StartColumn": { + "description": "File position: start column, starting from 1, optional.", + "type": "integer" + }, + "StartLine": { + "description": "File position: start line, starting from 1.", + "type": "integer" + }, + "ThreadId": { + "description": "ID of the thread the comment belongs to. If `ThreadId` is set, file position fields are ignored.", + "type": "string" + } + }, + "required": [ + "Content" + ], + "type": "object" + }, + "Draft": { + "description": "Whether to create the comment as a draft (invisible to others). Default is `false`.", + "type": "boolean" + }, + "Number": { + "description": "The number of the merge request.", + "type": "integer" + }, + "RepoId": { + "description": "The ID or path (e.g. path/to/repo) of the repository.", + "type": "string" + } + }, + "required": [ + "RepoId", + "Number", + "Comment" + ] + }, + "name": "mcp__Codebase__CreateMergeRequestComment" + }, + { + "annotations": {}, + "description": "Retrieve and convert Lark document content to markdown format. Supports various Lark document types including doc/docx/wiki/bitable/sheet. The tool fetches the complete document content and returns it as properly formatted markdown.", + "inputSchema": { + "type": "object", + "properties": { + "Page": { + "description": "Page number for pagination (default: 1)", + "type": "integer" + }, + "URL": { + "description": "The complete URL of the Lark document to retrieve. Format: https://{domain}.larkoffice.com/{type}/{document_id} (e.g., https://bytedance.larkoffice.com/docx/GyA3xxx, https://bytedance.larkoffice.com/wiki/ORPCxxx)", + "type": "string" + } + }, + "required": [ + "URL", + "Page" + ] + }, + "name": "mcp__Lark__GetDocument" + }, + { + "annotations": { + "readOnlyHint": false + }, + "description": "Create multiple comments in a merge request.", + "inputSchema": { + "type": "object", + "properties": { + "Comments": { + "description": "A list of comments to create.\nTo reply to an existing thread, set `ThreadId` to the ID of the thread.\nTo comment with a new thread: don't set `ThreadId`, and:\n - To comment on a file: set file position fields.\n - To comment on the overall: don't set file position fields.", + "items": { + "additionalProperties": false, + "properties": { + "Content": { + "description": "Content of the comment.", + "type": "string" + }, + "EndColumn": { + "description": "File position: end column, starting from 1, inclusive, optional.", + "type": "integer" + }, + "EndLine": { + "description": "File position: end line, starting from 1, inclusive.", + "type": "integer" + }, + "FilePath": { + "description": "File position: file path.", + "type": "string" + }, + "Side": { + "description": "File position: comment on the old side or new side of the merge request diff, default is `new`.", + "enum": [ + "old", + "new" + ], + "type": "string" + }, + "StartColumn": { + "description": "File position: start column, starting from 1, optional.", + "type": "integer" + }, + "StartLine": { + "description": "File position: start line, starting from 1.", + "type": "integer" + }, + "ThreadId": { + "description": "ID of the thread the comment belongs to. If `ThreadId` is set, file position fields are ignored.", + "type": "string" + } + }, + "required": [ + "Content" + ], + "type": "object" + }, + "type": "array" + }, + "Draft": { + "description": "Whether to create all these comments as draft (invisible to others). Default is `false`.", + "type": "boolean" + }, + "Number": { + "description": "The number of the merge request.", + "type": "integer" + }, + "RepoId": { + "description": "The ID or path (e.g. path/to/repo) of the repository.", + "type": "string" + } + }, + "required": [ + "RepoId", + "Number", + "Comments" + ] + }, + "name": "mcp__Codebase__CreateMergeRequestComments" + }, + { + "annotations": { + "readOnlyHint": true + }, + "description": "List comments in a merge request.", + "inputSchema": { + "type": "object", + "properties": { + "Number": { + "description": "The number of the merge request.", + "type": "integer" + }, + "RepoId": { + "description": "The ID or path (e.g. path/to/repo) of the repository.", + "type": "string" + }, + "Status": { + "description": "Filter by comment status.", + "enum": [ + "open", + "resolved" + ], + "type": "string" + } + }, + "required": [ + "RepoId", + "Number" + ] + }, + "name": "mcp__Codebase__ListMergeRequestComments" + }, + { + "annotations": { + "readOnlyHint": true + }, + "description": "Get my self (an authenticated Codebase user or an application).\nIt returns the information of myself. Consider to use this when a request include \"me\", \"my\", \"我\".", + "inputSchema": { + "type": "object", + "properties": { + "Reason": { + "description": "Optional. Reason the session was created.", + "type": "string" + } + } + }, + "name": "mcp__Codebase__GetMe" + }, + { + "annotations": { + "readOnlyHint": true + }, + "description": "List repositories with various filters.\nIf you want to get a repository by known repo ID or path (e.g. path/to/repo), it's better to use `GetRepository`.", + "inputSchema": { + "type": "object", + "properties": { + "ContributedById": { + "description": "Optional. Filter repositories by contributor ID. Requires that the specified user has contributed to the repository. If `ContributedById` is specified, no other filters are allowed.", + "type": "string" + }, + "PageNumber": { + "description": "Optional. Pagination number, starts from 1.", + "minimum": 1, + "type": "integer" + }, + "PageSize": { + "description": "Optional. Pagination size. Maximum 100, default is 10.", + "maximum": 100, + "minimum": 1, + "type": "integer" + }, + "Query": { + "description": "Optional. Filter repositories by path. Matches either the full repository path prefix or the prefix of the last segment in the path.", + "type": "string" + }, + "SortBy": { + "description": "Optional. Sort results by either the last activity time (`PushedAt`), repository path (`Path`) or the time of my last contribution (`ContributedAt`). Default is `ContributedAt`.", + "enum": [ + "PushedAt", + "Path", + "ContributedAt" + ], + "type": "string" + }, + "SortOrder": { + "description": "Optional. Sort order: `Asc` or `Desc`.", + "enum": [ + "Asc", + "Desc" + ], + "type": "string" + } + } + }, + "name": "mcp__Codebase__ListRepositories" + }, + { + "annotations": { + "readOnlyHint": false + }, + "description": "Create an issue in a repository.", + "inputSchema": { + "type": "object", + "properties": { + "AssigneeIds": { + "description": "Optional. List of user IDs to assign to the issue.", + "items": { + "type": "string" + }, + "type": "array" + }, + "Description": { + "description": "Optional. Description of the issue.", + "type": "string" + }, + "DueDate": { + "description": "Optional. Due date for the issue. e.g. `2006-01-02`.", + "type": "string" + }, + "LabelIds": { + "description": "Optional. List of label IDs to apply to the issue.", + "items": { + "type": "string" + }, + "type": "array" + }, + "ParentIssueId": { + "description": "Optional. Parent issue ID to create this as a sub-issue.", + "type": "string" + }, + "RepoId": { + "description": "The ID or path (e.g. path/to/repo) of the repository.", + "type": "string" + }, + "Status": { + "description": "Optional. Initial status for the issue. Default: \"backlog\".", + "enum": [ + "todo", + "done", + "canceled", + "in_progress", + "backlog" + ], + "type": "string" + }, + "Title": { + "description": "Title of the issue.", + "type": "string" + } + }, + "required": [ + "RepoId", + "Title" + ] + }, + "name": "mcp__Codebase__CreateIssue" + }, + { + "annotations": { + "readOnlyHint": false + }, + "description": "Update an existing issue.", + "inputSchema": { + "type": "object", + "properties": { + "AssigneeIds": { + "description": "Optional. New list of user IDs to assign to the issue. If not provided, assignees remain unchanged. If empty array is provided, all assignees will be removed.", + "items": { + "type": "string" + }, + "type": "array" + }, + "Description": { + "description": "Optional. New description for the issue.", + "type": "string" + }, + "DueDate": { + "description": "Optional. New due date for the issue. e.g. \"2006-01-02\". Set to empty string to clear due date.", + "type": "string" + }, + "Number": { + "description": "The number of the issue.", + "type": "integer" + }, + "ParentIssueId": { + "description": "Optional. New parent issue ID to change the parent-child relationship.", + "type": "string" + }, + "RepoId": { + "description": "The ID or path (e.g. path/to/repo) of the repository.", + "type": "string" + }, + "Status": { + "description": "Optional. New status for the issue. Available options: \"backlog\" (not started), \"todo\" (ready to work), \"in_progress\" (being worked on), \"done\" (completed), \"canceled\" (abandoned).", + "enum": [ + "todo", + "done", + "canceled", + "in_progress", + "backlog" + ], + "type": "string" + }, + "Title": { + "description": "Optional. New title for the issue.", + "type": "string" + } + }, + "required": [ + "RepoId", + "Number" + ] + }, + "name": "mcp__Codebase__UpdateIssue" + }, + { + "annotations": { + "readOnlyHint": true + }, + "description": "List review network statistics.\nIt returns the review network statistics based on the specified criteria.\nUse self as author if both author and reviewer are empty.", + "inputSchema": { + "type": "object", + "properties": { + "AuthorId": { + "description": "Optional. ID of the author to filter by.", + "type": "string" + }, + "AuthorUsername": { + "description": "Optional. Username of the author to filter by.", + "type": "string" + }, + "ReviewerId": { + "description": "Optional. ID of the reviewer to filter by.", + "type": "string" + }, + "ReviewerUsername": { + "description": "Optional. Username of the reviewer to filter by.", + "type": "string" + }, + "TopN": { + "description": "Optional. Number of top results to return. Default 3.", + "type": "integer" + } + } + }, + "name": "mcp__Codebase__ListReviewNetworkStatistics" + }, + { + "annotations": {}, + "description": "Understand the image content from lark document or message. The tool fetches the image data, invoke image understanding model to get the image content, and returns it.", + "inputSchema": { + "type": "object", + "properties": { + "Identifier": { + "description": "The image's unified lark resource identifier. Format: lark-image-{source}-{image_token} (e.g., lark-image-message-hvasdlw, lark-image-document-jkljwda).", + "type": "string" + }, + "MessageID": { + "description": "Required only when source is \"message\"; ignored when source is \"document\".", + "type": "string" + } + }, + "required": [ + "MessageID", + "Identifier" + ] + }, + "name": "mcp__Lark__UnderstandImage" + }, + { + "annotations": { + "readOnlyHint": true + }, + "description": "List merge requests in a repository.", + "inputSchema": { + "type": "object", + "properties": { + "Author": { + "description": "Filter by the author's username.", + "type": "string" + }, + "CommitId": { + "description": "The commit ID must be complete.", + "type": "string" + }, + "Draft": { + "description": "Draft means not ready to be reviewed or merged.", + "type": "boolean" + }, + "Labels": { + "items": { + "type": "string" + }, + "type": "array" + }, + "PageNumber": { + "description": "Pagination number, starts from 1.", + "minimum": 1, + "type": "integer" + }, + "PageSize": { + "description": "Pagination size. Default is 10.", + "maximum": 100, + "minimum": 1, + "type": "integer" + }, + "RepoId": { + "description": "The ID or path (e.g. path/to/repo) of the repository.", + "type": "string" + }, + "ReviewStatus": { + "description": "Filter by review status.", + "enum": [ + "pending", + "passed", + "disapproved" + ], + "type": "string" + }, + "Reviewer": { + "description": "Filter by the reviewer's username.", + "type": "string" + }, + "SortBy": { + "enum": [ + "CreatedAt", + "UpdatedAt" + ], + "type": "string" + }, + "SortOrder": { + "enum": [ + "Asc", + "Desc" + ], + "type": "string" + }, + "SourceBranch": { + "description": "Filter by the source branch name.", + "type": "string" + }, + "Status": { + "description": "Filter by merge request status.", + "enum": [ + "open", + "closed", + "merged" + ], + "type": "string" + }, + "TargetBranch": { + "description": "Filter by the target branch name.", + "type": "string" + }, + "Title": { + "type": "string" + } + }, + "required": [ + "RepoId" + ] + }, + "name": "mcp__Codebase__ListMergeRequests" + }, + { + "annotations": { + "readOnlyHint": true + }, + "description": "Count user activities by date range.\nIt returns the count of user activities within the specified date range.", + "inputSchema": { + "type": "object", + "properties": { + "EndDate": { + "description": "Optional. End date (YYYY-MM-DD format). If StartDate and EndDate are not both provided, use now.", + "type": "string" + }, + "StartDate": { + "description": "Optional. Start date (YYYY-MM-DD format). If StartDate and EndDate are not both provided, use now-365days.", + "type": "string" + }, + "TenantId": { + "description": "Optional. Tenant ID. If Username and TenantId are not provided, use self.", + "type": "string" + }, + "Username": { + "description": "Optional. Username. If Username and TenantId are not provided, use self.", + "type": "string" + } + } + }, + "name": "mcp__Codebase__CountUserActivities" + }, + { + "annotations": { + "readOnlyHint": false + }, + "description": "Merge a merge request.", + "inputSchema": { + "type": "object", + "properties": { + "Number": { + "description": "The number of the merge request.", + "type": "integer" + }, + "RepoId": { + "description": "The ID or path (e.g. path/to/repo) of the repository.", + "type": "string" + } + }, + "required": [ + "RepoId", + "Number" + ] + }, + "name": "mcp__Codebase__MergeMergeRequest" + }, + { + "annotations": { + "readOnlyHint": false, + "idempotentHint": true + }, + "description": "Publish all draft comments previously created in this merge request.", + "inputSchema": { + "type": "object", + "properties": { + "Number": { + "description": "The number of the merge request.", + "type": "integer" + }, + "RepoId": { + "description": "The ID or path (e.g. path/to/repo) of the repository.", + "type": "string" + }, + "ReviewContent": { + "description": "Optional. The summary review content to include for publishing.", + "type": "string" + } + }, + "required": [ + "RepoId", + "Number" + ] + }, + "name": "mcp__Codebase__PublishDraftComments" + }, + { + "annotations": { + "readOnlyHint": true + }, + "description": "List check runs for a merge request.", + "inputSchema": { + "type": "object", + "properties": { + "Number": { + "description": "The number of the merge request.", + "type": "integer" + }, + "PageNumber": { + "description": "Pagination number, starts from 1.", + "type": "integer" + }, + "PageSize": { + "description": "Pagination size. Maximum 100, default is 10.", + "type": "integer" + }, + "RepoId": { + "description": "The ID or path (e.g. path/to/repo) of the repository.", + "type": "string" + }, + "Unsuccessful": { + "description": "`true` means filter unsuccessful check runs (pending, running, failed, warning, etc.). Default is `false`.", + "type": "boolean" + } + }, + "required": [ + "RepoId", + "Number", + "Unsuccessful", + "PageNumber", + "PageSize" + ] + }, + "name": "mcp__Codebase__ListMergeRequestCheckRuns" + }, + { + "annotations": { + "readOnlyHint": false + }, + "description": "Create a merge request.", + "inputSchema": { + "type": "object", + "properties": { + "Description": { + "type": "string" + }, + "Draft": { + "description": "Whether the merge request is a draft. Draft means not ready to be reviewed or merged.", + "type": "boolean" + }, + "LabelIds": { + "description": "The IDs of the labels to apply to the merge request.", + "items": { + "type": "string" + }, + "type": "array" + }, + "MergeMethod": { + "enum": [ + "rebase_merge", + "merge_commit", + "merge_commit_with_semi_linear_history" + ], + "type": "string" + }, + "RepoId": { + "description": "The ID or path (e.g. path/to/repo) of the repository.", + "type": "string" + }, + "SourceBranch": { + "description": "The source branch name.", + "type": "string" + }, + "TargetBranch": { + "description": "The target branch name.", + "type": "string" + }, + "Title": { + "type": "string" + }, + "WorkItemIds": { + "description": "The IDs of the work items (like Meego tickets) to link to the merge request.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "RepoId", + "SourceBranch", + "TargetBranch", + "Title" + ] + }, + "name": "mcp__Codebase__CreateMergeRequest" + }, + { + "annotations": { + "readOnlyHint": true + }, + "description": "Get an issue.", + "inputSchema": { + "type": "object", + "properties": { + "Number": { + "description": "The number of the issue.", + "type": "integer" + }, + "RepoId": { + "description": "The ID or path (e.g. path/to/repo) of the repository.", + "type": "string" + } + }, + "required": [ + "RepoId", + "Number" + ] + }, + "name": "mcp__Codebase__GetIssue" + }, + { + "annotations": { + "readOnlyHint": true + }, + "description": "Get a merge request (abbreviated as MR).", + "inputSchema": { + "type": "object", + "properties": { + "Number": { + "description": "The number of the merge request.", + "type": "integer" + }, + "RepoId": { + "description": "The ID or path (e.g. path/to/repo) of the repository.", + "type": "string" + } + }, + "required": [ + "RepoId", + "Number" + ] + }, + "name": "mcp__Codebase__GetMergeRequest" + }, + { + "annotations": { + "readOnlyHint": true + }, + "description": "Get user statistics for a specific user.\nIt returns the statistics information of the specified user.", + "inputSchema": { + "type": "object", + "properties": { + "NaturalYear": { + "description": "Optional. Natural year to fetch statistics for. Data available only from 2022 onwards.", + "type": "integer" + }, + "RelativeDays": { + "description": "Optional. Relative time range in days to fetch statistics for. Enum values: 7 / 30 / 180 / 365. Defaults to 365 if both RelativeDays and NaturalYear are not provided.", + "type": "integer" + }, + "UserId": { + "description": "Optional. ID of the user to fetch statistics for. Returns self if both Id and Username are empty.", + "type": "string" + }, + "Username": { + "description": "Optional. Username of the user to fetch statistics for. Returns self if both Id and Username are empty.", + "type": "string" + } + } + }, + "name": "mcp__Codebase__GetUserStatistics" + }, + { + "annotations": { + "readOnlyHint": true + }, + "description": "Get the review status of a merge request.", + "inputSchema": { + "type": "object", + "properties": { + "Number": { + "description": "The number of the merge request.", + "type": "integer" + }, + "RepoId": { + "description": "The ID or path (e.g. path/to/repo) of the repository.", + "type": "string" + } + }, + "required": [ + "RepoId", + "Number" + ] + }, + "name": "mcp__Codebase__GetMergeRequestReviewStatus" + }, + { + "annotations": { + "readOnlyHint": false + }, + "description": "Update a merge request.", + "inputSchema": { + "type": "object", + "properties": { + "AutoMerge": { + "description": "Whether to enable auto merge.", + "type": "boolean" + }, + "Description": { + "description": "The new description of the merge request.", + "type": "string" + }, + "Draft": { + "description": "Whether the merge request is a draft. Draft means not ready to be reviewed or merged.", + "type": "boolean" + }, + "Number": { + "description": "The number of the merge request.", + "type": "integer" + }, + "RepoId": { + "description": "The ID or path (e.g. path/to/repo) of the repository.", + "type": "string" + }, + "Title": { + "description": "The new title of the merge request.", + "type": "string" + }, + "WorkItemIds": { + "description": "The IDs of the work items (like Meego tickets) to link to the merge request.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "RepoId", + "Number" + ] + }, + "name": "mcp__Codebase__UpdateMergeRequest" + }, + { + "annotations": { + "readOnlyHint": true + }, + "description": "Search issues across repositories or within a specific repository.", + "inputSchema": { + "type": "object", + "properties": { + "Assignee": { + "description": "Optional. Filter by assignee username. Only returns issues assigned to the specified user.", + "type": "string" + }, + "DueDateSince": { + "description": "Optional. Filter issues with due dates on or after this date. Use RFC3339 format (e.g. \"2024-01-15T00:00:00Z\").", + "type": "string" + }, + "DueDateUntil": { + "description": "Optional. Filter issues with due dates on or before this date. Use RFC3339 format (e.g. \"2024-12-31T23:59:59Z\").", + "type": "string" + }, + "PageSize": { + "description": "Optional. Number of issues to return per page. Must be between 1-100. Default: 10.", + "maximum": 100, + "minimum": 1, + "type": "integer" + }, + "PageToken": { + "description": "Optional. Pagination token from previous response. Leave empty to get the first page.", + "type": "string" + }, + "Query": { + "description": "Optional. Search text to match against issue titles and descriptions. Supports partial matching.", + "type": "string" + }, + "RepoId": { + "description": "Optional. Repository to search in. Can be repository ID (e.g. \"123456\") or full path (e.g. \"company/project\"). If not specified, searches across all accessible repositories.", + "type": "string" + }, + "SortBy": { + "description": "Optional. Sort field: \"CreatedAt\" (when issue was created) or \"UpdatedAt\" (when issue was last modified). Default: \"UpdatedAt\".", + "enum": [ + "CreatedAt", + "UpdatedAt" + ], + "type": "string" + }, + "SortOrder": { + "description": "Optional. Sort direction: \"Asc\" (oldest first) or \"Desc\" (newest first). Default: \"Desc\".", + "enum": [ + "Asc", + "Desc" + ], + "type": "string" + }, + "Status": { + "description": "Optional. Filter by issue status. Available options: \"backlog\" (not started), \"todo\" (ready to work), \"in_progress\" (being worked on), \"done\" (completed), \"canceled\" (abandoned).", + "enum": [ + "todo", + "done", + "canceled", + "in_progress", + "backlog" + ], + "type": "string" + } + } + }, + "name": "mcp__Codebase__SearchIssues" + } + ] + } + ], + "message": { + "role": "assistant", + "content": "Based on my analysis of the codebase, this project is **OpenCode** - an open-source AI coding agent built for the terminal. Here's what I found:\n\n## Project Overview\n\n**OpenCode** is a terminal-based AI coding assistant that provides:\n- **AI-powered coding assistance** with multiple LLM provider support (Anthropic, OpenAI, Google)\n- **Terminal User Interface (TUI)** focused experience\n- **Tool-based architecture** for file operations, code editing, bash commands, web search, etc.\n- **Session persistence** and state management\n- **Multi-agent system** with different agent types (build, plan, general)\n\n## Key Components\n\n### 1. **Kiana Package** (`packages/kiana/`)\nA minimal headless coding agent library that provides:\n- **Core session management** with persistence\n- **Provider abstraction** for multiple AI models\n- **Tool system** with 15+ built-in tools:\n - File operations (read, write, edit, glob, grep, list)\n - Development tools (bash, codesearch)\n - Web tools (webfetch, websearch)\n - Task management (todo, task/subagent)\n- **Event system** for real-time updates\n- **CLI wrapper** for standalone usage\n\n### 2. **Architecture Features**\n- **Provider-agnostic**: Works with Claude, OpenAI, Google, or local models\n- **Client/server architecture**: Allows remote control from mobile apps\n- **LSP support**: Built-in Language Server Protocol integration\n- **Session persistence**: Saves and restores conversation history\n- **Streaming controls**: Real-time response handling\n\n### 3. **Agent Types**\n- **build**: Full-access agent for development work (default)\n- **plan**: Read-only agent for analysis and exploration\n- **general**: Subagent for complex multi-step tasks\n\n## Key Differentiators\n- **100% open source** (vs Claude Code)\n- **Provider-agnostic** (not tied to any single AI provider)\n- **Terminal-first** design by neovim users\n- **Extensible tool system** for custom operations\n\nThe project appears to be a legitimate, well-structured open-source alternative to proprietary AI coding assistants, focusing on terminal users and provider flexibility.", + "response_meta": { + "finish_reason": "stop", + "usage": { + "prompt_tokens": 21352, + "prompt_token_details": { + "cached_tokens": 0 + }, + "completion_tokens": 462, + "total_tokens": 21814 + } + }, + "extra": { + "openai-request-id": "0217654438270248c115a23d7d04490da5d7d9d1ebb294c6556d0" + } + } +} diff --git a/packages/kiana/src/config.ts b/packages/kiana/src/config.ts index b683b2f3f28..75000ed7792 100644 --- a/packages/kiana/src/config.ts +++ b/packages/kiana/src/config.ts @@ -22,6 +22,10 @@ export const ConfigSchema = z.object({ // Maximum number of retries for rate limit errors (default: 5) // Uses exponential backoff and respects Retry-After headers maxRetries: z.number().int().min(0).max(10).optional().default(5), + // Whether to normalize stringified tool arguments (default: true) + // AI SDK v6 handles JSON parsing internally, so this may not be needed + // Set to false to test SDK's native parsing + normalizeToolArgs: z.boolean().optional().default(true), }) export type Config = z.infer @@ -108,7 +112,11 @@ export const CONFIG_TEMPLATE = `{ // Maximum retries for rate limit errors (default: 5) // Uses exponential backoff and respects Retry-After headers - "maxRetries": 5 + "maxRetries": 5, + + // Whether to normalize stringified tool arguments (default: true) + // AI SDK v6 handles JSON parsing internally, set to false to use SDK's native parsing + "normalizeToolArgs": true } ` diff --git a/packages/kiana/src/session.ts b/packages/kiana/src/session.ts index 388b31a085a..e94e39c916b 100644 --- a/packages/kiana/src/session.ts +++ b/packages/kiana/src/session.ts @@ -886,6 +886,7 @@ function buildAITools( messageID, workingDirectory, abort: options.abortSignal ?? new AbortController().signal, + normalizeToolArgs: state.config.normalizeToolArgs, metadata: (val) => { // Find the tool part and update it const toolPart = parts.find( @@ -949,6 +950,7 @@ Since you are running headless without user interaction: # Tool usage - Call multiple tools in parallel when independent. - Use specialized tools (Read, Write, Edit, Glob, Grep) instead of bash equivalents. +- CRITICAL: When calling tools, pass arguments as a proper JSON object (not a string). Example good: {"command": "git status", "description": "Check git status"}. Example bad: "{\\"command\\": \\"git status\\"}". # Code References When referencing code, include \`file_path:line_number\` for navigation.` diff --git a/packages/kiana/src/tool/tool.ts b/packages/kiana/src/tool/tool.ts index 287ae336356..ce0b2b917e4 100644 --- a/packages/kiana/src/tool/tool.ts +++ b/packages/kiana/src/tool/tool.ts @@ -11,6 +11,8 @@ export interface ToolContext { callID?: string workingDirectory: string metadata(input: { title?: string; metadata?: Record }): void + /** Whether to normalize stringified tool arguments (default: true) */ + normalizeToolArgs?: boolean } /** @@ -49,8 +51,20 @@ export function defineTool

( description: config.description, parameters: config.parameters, execute: async (args, ctx) => { + // Normalize args: some models erroneously stringify tool inputs; attempt to JSON.parse when string + // This can be disabled via normalizeToolArgs=false since AI SDK v6 handles this internally + let normalizedArgs: unknown = args + if (ctx.normalizeToolArgs !== false && typeof args === "string") { + try { + normalizedArgs = JSON.parse(args) + } catch { + // Fall through to validation error; safeParse will surface a clear message + normalizedArgs = args + } + } + // Validate parameters - const result = config.parameters.safeParse(args) + const result = config.parameters.safeParse(normalizedArgs) if (!result.success) { const errors = result.error.issues .map((e: z.ZodIssue) => `${e.path.join(".")}: ${e.message}`)