diff --git a/.eslintignore b/.eslintignore index b7b7316..d9382c4 100644 --- a/.eslintignore +++ b/.eslintignore @@ -1,3 +1,4 @@ dist node_modules examples +locales diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md index 128472d..a8825aa 100644 --- a/.github/copilot-instructions.md +++ b/.github/copilot-instructions.md @@ -11,7 +11,7 @@ - **Installed Size**: ~106MB (including all dependencies in node_modules) - **Languages**: TypeScript (primary), JavaScript (compiled output) - **Target Runtime**: Node.js (CommonJS modules) -- **Framework**: Model-agnostic, no AI framework dependencies +- **Framework**: Model-agnostic core; optional adapters (LangChain) - **Package Manager**: pnpm (preferred) or npm (fallback) - **Testing**: vitest - **Linting**: ESLint with TypeScript support @@ -52,7 +52,7 @@ npm run test # Runs all vitest tests # Duration: ~5-10 seconds - # Should show: "Test Files 2 passed (2), Tests 7 passed (7)" + # Should show: "Test Files 3 passed (3), Tests 16 passed (16)" ``` 4. **Format Code** @@ -97,20 +97,24 @@ The repository uses GitHub Actions CI that runs: ``` / -├── src/ # TypeScript source code -│ ├── index.ts # Main exports (TrimCompressor, SummarizeCompressor, interfaces) -│ ├── interfaces.ts # Core type definitions (SlimContextMessage, etc.) -│ └── strategies/ # Compression strategy implementations -│ ├── trim.ts # TrimCompressor: keeps first + last N messages -│ └── summarize.ts # SummarizeCompressor: AI-powered summarization -├── tests/ # vitest test files -│ ├── trim.test.ts # Tests for TrimCompressor -│ └── summarize.test.ts # Tests for SummarizeCompressor -├── examples/ # Documentation-only examples (not code) -│ ├── OPENAI_EXAMPLE.md # Copy-paste OpenAI integration -│ └── LANGCHAIN_EXAMPLE.md # Copy-paste LangChain integration -├── dist/ # Compiled JavaScript output (generated) -└── package.json # npm package configuration +├── src/ # TypeScript source code +│ ├── index.ts # Main exports (trim, summarize, interfaces, adapters namespace) +│ ├── interfaces.ts # Core type definitions (SlimContextMessage, etc.) +│ ├── adapters/ # Integration adapters (optional) +│ │ └── langchain.ts # LangChain adapter + helpers (compressLangChainHistory, toSlimModel) +│ └── strategies/ # Compression strategy implementations +│ ├── trim.ts # TrimCompressor: keeps first + last N messages +│ └── summarize.ts # SummarizeCompressor: AI-powered summarization +├── tests/ # vitest test files +│ ├── trim.test.ts # Tests for TrimCompressor +│ ├── summarize.test.ts # Tests for SummarizeCompressor +│ └── langchain.test.ts # Tests for LangChain adapter + helper +├── examples/ # Documentation-only examples (not code) +│ ├── OPENAI_EXAMPLE.md # Copy-paste OpenAI integration +│ ├── LANGCHAIN_EXAMPLE.md # Copy-paste LangChain integration +│ └── LANGCHAIN_COMPRESS_HISTORY.md # One-call compressLangChainHistory usage +├── dist/ # Compiled JavaScript output (generated) +└── package.json # npm package configuration ``` ### Configuration Files @@ -134,12 +138,13 @@ The repository uses GitHub Actions CI that runs: - **TrimCompressor**: Simple strategy keeping first (system) message + last N-1 messages - **SummarizeCompressor**: AI-powered strategy that summarizes middle conversations when exceeding maxMessages -**Framework Independence**: No dependencies on OpenAI, LangChain, or other AI frameworks. Users implement the minimal `SlimContextChatModel` interface to connect their preferred model. +**Framework Independence**: Core library has no framework dependencies. An optional LangChain adapter is provided for convenience; core remains BYOM. ### Dependencies and Build Artifacts -- **Production**: Zero dependencies (framework-agnostic design) +- **Production**: Zero runtime dependencies (framework-agnostic design) - **Development**: TypeScript, ESLint, Prettier, vitest, various type definitions +- **Optional peer**: `@langchain/core` (only if using the LangChain adapter). The adapter is exported under `slimcontext/adapters/langchain` and as a `langchain` namespace from the root export. - **Ignored Files**: dist/, node_modules/, examples/ (linting), \*.tgz - **Distributed Files**: Only dist/ directory (compiled JS + .d.ts files) @@ -149,8 +154,8 @@ The repository uses GitHub Actions CI that runs: ```bash npm run test -# Expects: 7 tests across 2 files, all passing -# Tests cover both TrimCompressor and SummarizeCompressor functionality +# Expects: ~16 tests across 3 files, all passing +# Tests cover TrimCompressor, SummarizeCompressor, and the LangChain adapter/helper ``` ### Manual Verification Steps @@ -181,6 +186,17 @@ npm run test - **src/interfaces.ts**: Core type definitions - modify for interface changes - **src/strategies/trim.ts**: Simple compression logic - **src/strategies/summarize.ts**: AI-powered compression with alignment logic +- **src/adapters/langchain.ts**: LangChain adapter and helpers (`compressLangChainHistory`, `toSlimModel`, conversions) + +### Adapters + +- LangChain adapter import options: + - Recommended (works across module systems): `import * as langchain from 'slimcontext/adapters/langchain'` + - Root namespace (available as a property on the root export; usage differs by module system): + - CommonJS: `const { langchain } = require('slimcontext')` + - ESM/TypeScript: `import * as slim from 'slimcontext'; const { langchain } = slim;` + - Note: `import { langchain } from 'slimcontext'` may not work in all environments due to CJS/ESM interop. Prefer one of the patterns above. + - Includes a one-call history helper: `compressLangChainHistory(history, options)` --- diff --git a/CHANGELOG.md b/CHANGELOG.md index 873fcbe..e1cbfec 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,7 +4,29 @@ All notable changes to this project will be documented in this file. The format is based on Keep a Changelog, and this project adheres to Semantic Versioning. -## [Unreleased] - 2025-08-24 +## [2.1.0] - 2025-08-27 + +### Added + +- LangChain adapter under `src/adapters/langchain.ts` with helpers: + - `extractContent`, `roleFromMessageType`, `baseToSlim`, `slimToLangChain` + - `toSlimModel(llm)` wrapper to use LangChain `BaseChatModel` with `SummarizeCompressor`. + - `compressLangChainHistory(history, options)` high-level helper for one-call compression on `BaseMessage[]`. +- Tests for adapter behavior in `tests/langchain.test.ts`. +- Examples: + - `examples/LANGCHAIN_EXAMPLE.md`: adapting a LangChain model to `SlimContextChatModel`. + - `examples/LANGCHAIN_COMPRESS_HISTORY.md`: using `compressLangChainHistory` directly. + +### Changed + +- README updated with a LangChain adapter section and one-call usage sample. + +### Notes + +- The adapter treats LangChain `tool` messages as `assistant` during compression. +- `@langchain/core` is an optional peer dependency; only needed if you use the adapter. + +## [2.0.0] - 2025-08-24 ### Breaking diff --git a/README.md b/README.md index 0f8e624..735ac96 100644 --- a/README.md +++ b/README.md @@ -7,13 +7,14 @@ Lightweight, model-agnostic chat history compression utilities for AI assistants ## Examples - OpenAI: see `examples/OPENAI_EXAMPLE.md` (copy-paste snippet; BYOM, no deps added here). -- LangChain: see `examples/LANGCHAIN_EXAMPLE.md` (adapts a LangChain chat model to `SlimContextChatModel`). +- LangChain: see `examples/LANGCHAIN_EXAMPLE.md` and `examples/LANGCHAIN_COMPRESS_HISTORY.md`. ## Features - Trim strategy: keep the first (system) message and last N messages. - Summarize strategy: summarize the middle portion using your own chat model. - Framework agnostic: plug in any model wrapper implementing a minimal `invoke()` interface. +- Optional LangChain adapter with a one-call helper for compressing histories. ## Installation @@ -113,8 +114,48 @@ if (history.length > 50) { ## Example Integration -See `examples/LANGCHAIN_EXAMPLE.md` for a LangChain-style example. -See `examples/OPENAI_EXAMPLE.md` for an OpenAI example (copy-paste snippet). +- See `examples/OPENAI_EXAMPLE.md` for an OpenAI copy-paste snippet. +- See `examples/LANGCHAIN_EXAMPLE.md` for a LangChain-style integration. +- See `examples/LANGCHAIN_COMPRESS_HISTORY.md` for a one-call LangChain history compression helper. + +## Adapters + +### LangChain + +If you already use LangChain chat models, you can use the built-in adapter. It’s exported in two ways: + +- Namespaced: `import { langchain } from 'slimcontext'` +- Direct path: `import * as langchain from 'slimcontext/adapters/langchain'` + +Common helpers: + +- `compressLangChainHistory(history, options)` – one-call compression for LangChain `BaseMessage[]`. +- `toSlimModel(llm)` – wrap a LangChain `BaseChatModel` for `SummarizeCompressor`. + +Example (one-call history compression): + +```ts +import { AIMessage, HumanMessage, SystemMessage } from '@langchain/core/messages'; +import { ChatOpenAI } from '@langchain/openai'; +import { langchain } from 'slimcontext'; + +const lc = new ChatOpenAI({ model: 'gpt-4o-mini', temperature: 0 }); + +const history = [ + new SystemMessage('You are helpful.'), + new HumanMessage('Please summarize the discussion so far.'), + new AIMessage('Certainly!'), + // ...more messages +]; + +const compact = await langchain.compressLangChainHistory(history, { + strategy: 'summarize', + llm: lc, // BaseChatModel + maxMessages: 12, +}); +``` + +See `examples/LANGCHAIN_COMPRESS_HISTORY.md` for a fuller copy-paste example. ## API diff --git a/examples/LANGCHAIN_COMPRESS_HISTORY.md b/examples/LANGCHAIN_COMPRESS_HISTORY.md new file mode 100644 index 0000000..ab5482b --- /dev/null +++ b/examples/LANGCHAIN_COMPRESS_HISTORY.md @@ -0,0 +1,42 @@ +# LangChain one-call history compression + +This example shows how to use `compressLangChainHistory` to compress a LangChain message history in a single call. + +```ts +import { AIMessage, HumanMessage, SystemMessage } from '@langchain/core/messages'; +import { ChatOpenAI } from '@langchain/openai'; +import { langchain } from 'slimcontext'; + +// 1) Create your LangChain chat model (any BaseChatModel works) +const llm = new ChatOpenAI({ model: 'gpt-4o-mini', temperature: 0 }); + +// 2) Build your existing LangChain-compatible history +const history = [ + new SystemMessage('You are a helpful assistant.'), + new HumanMessage('Hi! Help me plan a 3-day trip to Tokyo.'), + new AIMessage('Sure, what are your interests?'), + // ... many more messages +]; + +// 3) Compress with either summarize (default) or trim strategy +const compact = await langchain.compressLangChainHistory(history, { + strategy: 'summarize', + llm, // pass your BaseChatModel + maxMessages: 12, // target total messages after compression (system + summary + recent) +}); + +// Alternatively, use trimming without an LLM: +const trimmed = await langchain.compressLangChainHistory(history, { + strategy: 'trim', + messagesToKeep: 8, +}); + +console.log('Original size:', history.length); +console.log('Summarized size:', compact.length); +console.log('Trimmed size:', trimmed.length); +``` + +Notes + +- `@langchain/core` is an optional peer dependency. Install it only if you use the adapter. +- `maxMessages` must be at least 4 for summarize (system + summary + 2 recent). diff --git a/package.json b/package.json index 1fd3752..000928b 100644 --- a/package.json +++ b/package.json @@ -1,9 +1,13 @@ { "name": "slimcontext", - "version": "2.0.1", + "version": "2.1.0", "description": "Lightweight, model-agnostic chat history compression (trim + summarize) for AI assistants.", "main": "dist/index.js", "types": "dist/index.d.ts", + "exports": { + ".": "./dist/index.js", + "./adapters/langchain": "./dist/adapters/langchain.js" + }, "files": [ "dist" ], @@ -37,6 +41,14 @@ }, "homepage": "https://github.com/Agentailor/slimcontext#readme", "packageManager": "pnpm@10.14.0", + "peerDependencies": { + "@langchain/core": ">=0.3.71 <1" + }, + "peerDependenciesMeta": { + "@langchain/core": { + "optional": true + } + }, "devDependencies": { "eslint": "^8.57.0", "@types/node": "^24.3.0", @@ -47,6 +59,7 @@ "eslint-plugin-unused-imports": "^4.2.0", "prettier": "^3.6.2", "typescript": "^5.9.2", - "vitest": "^3.2.4" + "vitest": "^3.2.4", + "@langchain/core": "^0.3.71" } } diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 8c18556..230f9a2 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -7,6 +7,9 @@ settings: importers: .: devDependencies: + '@langchain/core': + specifier: ^0.3.71 + version: 0.3.72 '@types/node': specifier: ^24.3.0 version: 24.3.0 @@ -39,6 +42,12 @@ importers: version: 3.2.4(@types/node@24.3.0) packages: + '@cfworker/json-schema@4.1.1': + resolution: + { + integrity: sha512-gAmrUZSGtKc3AiBL71iNWxDsyUC5uMaKKGdvzYsBoTW/xi42JQHl7eKV2OYzCUqvc+D2RCcf7EXY2iCyFIk6og==, + } + '@esbuild/aix-ppc64@0.21.5': resolution: { @@ -304,6 +313,13 @@ packages: integrity: sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==, } + '@langchain/core@0.3.72': + resolution: + { + integrity: sha512-WsGWVZYnlKffj2eEfDocPNiaTRoxyYiLSQdQ7oxZvxGZBqo/90vpjbC33UGK1uPNBM4kT+pkdaol/MnvKUh8TQ==, + } + engines: { node: '>=18' } + '@nodelib/fs.scandir@2.1.5': resolution: { @@ -521,6 +537,18 @@ packages: integrity: sha512-aPTXCrfwnDLj4VvXrm+UUCQjNEvJgNA8s5F1cvwQU+3KNltTOkBm1j30uNLyqqPNe7gE3KFzImYoZEfLhp4Yow==, } + '@types/retry@0.12.0': + resolution: + { + integrity: sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==, + } + + '@types/uuid@10.0.0': + resolution: + { + integrity: sha512-7gqG38EyHgyP1S+7+xomFtL+ZNHcKv6DwNaCZmJmo1vgMugyF3TCnXVg4t1uk89mLNwnLtnY3TpOpCOyp1/xHQ==, + } + '@typescript-eslint/eslint-plugin@8.40.0': resolution: { @@ -702,6 +730,13 @@ packages: } engines: { node: '>=8' } + ansi-styles@5.2.0: + resolution: + { + integrity: sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==, + } + engines: { node: '>=10' } + argparse@2.0.1: resolution: { @@ -777,6 +812,12 @@ packages: integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==, } + base64-js@1.5.1: + resolution: + { + integrity: sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==, + } + brace-expansion@1.1.12: resolution: { @@ -831,6 +872,13 @@ packages: } engines: { node: '>=6' } + camelcase@6.3.0: + resolution: + { + integrity: sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==, + } + engines: { node: '>=10' } + chai@5.3.1: resolution: { @@ -871,6 +919,12 @@ packages: integrity: sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==, } + console-table-printer@2.14.6: + resolution: + { + integrity: sha512-MCBl5HNVaFuuHW6FGbL/4fB7N/ormCy+tQ+sxTrF6QtSbSNETvPuOVbkJBhzDgYhvjWGrTma4eYJa37ZuoQsPw==, + } + cross-spawn@7.0.6: resolution: { @@ -922,6 +976,13 @@ packages: supports-color: optional: true + decamelize@1.2.0: + resolution: + { + integrity: sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==, + } + engines: { node: '>=0.10.0' } + deep-eql@5.0.2: resolution: { @@ -1175,6 +1236,12 @@ packages: } engines: { node: '>=0.10.0' } + eventemitter3@4.0.7: + resolution: + { + integrity: sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==, + } + expect-type@1.2.2: resolution: { @@ -1658,6 +1725,12 @@ packages: integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==, } + js-tiktoken@1.0.21: + resolution: + { + integrity: sha512-biOj/6M5qdgx5TKjDnFT1ymSpM5tbd3ylwDtrQvFQSu0Z7bBYko2dF+W/aUkXUPuk6IVpRxk/3Q2sHOzGlS36g==, + } + js-tokens@9.0.1: resolution: { @@ -1702,6 +1775,26 @@ packages: integrity: sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==, } + langsmith@0.3.64: + resolution: + { + integrity: sha512-67yccwrREK5pgVX7PktkUImbNT4bQpSPH7SETSPYZ+X42OpGGAABU4s+pNV/55/ke+INrmpXXlKxfMpZOCrzQA==, + } + peerDependencies: + '@opentelemetry/api': '*' + '@opentelemetry/exporter-trace-otlp-proto': '*' + '@opentelemetry/sdk-trace-base': '*' + openai: '*' + peerDependenciesMeta: + '@opentelemetry/api': + optional: true + '@opentelemetry/exporter-trace-otlp-proto': + optional: true + '@opentelemetry/sdk-trace-base': + optional: true + openai: + optional: true + levn@0.4.1: resolution: { @@ -1780,6 +1873,13 @@ packages: integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==, } + mustache@4.2.0: + resolution: + { + integrity: sha512-71ippSywq5Yb7/tVYyGbkBggbU8H3u5Rz56fH60jGFgr8uHwxs+aSKeqmluIVzM0m0kB7xQjKS6qPfd0b2ZoqQ==, + } + hasBin: true + nanoid@3.3.11: resolution: { @@ -1856,6 +1956,13 @@ packages: } engines: { node: '>= 0.4' } + p-finally@1.0.0: + resolution: + { + integrity: sha512-LICb2p9CB7FS+0eR1oqWnHhp0FljGLZCWBE9aix0Uye9W8LTQPwMTYVGWQWIw9RdQiDg4+epXQODwIYJtSJaow==, + } + engines: { node: '>=4' } + p-limit@3.1.0: resolution: { @@ -1870,6 +1977,27 @@ packages: } engines: { node: '>=10' } + p-queue@6.6.2: + resolution: + { + integrity: sha512-RwFpb72c/BhQLEXIZ5K2e+AhgNVmIejGlTgiB9MzZ0e93GRvqZ7uSi0dvRF7/XIXDeNkra2fNHBxTyPDGySpjQ==, + } + engines: { node: '>=8' } + + p-retry@4.6.2: + resolution: + { + integrity: sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==, + } + engines: { node: '>=8' } + + p-timeout@3.2.0: + resolution: + { + integrity: sha512-rhIwUycgwwKcP9yTOOFK/AKsAopjjCakVqLHePO3CC6Mir1Z99xT+R63jZxAT5lFZLa2inS5h+ZS2GvR99/FBg==, + } + engines: { node: '>=8' } + parent-module@1.0.1: resolution: { @@ -2008,6 +2136,13 @@ packages: engines: { node: '>= 0.4' } hasBin: true + retry@0.13.1: + resolution: + { + integrity: sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==, + } + engines: { node: '>= 4' } + reusify@1.1.0: resolution: { @@ -2142,6 +2277,12 @@ packages: integrity: sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==, } + simple-wcswidth@1.1.2: + resolution: + { + integrity: sha512-j7piyCjAeTDSjzTSQ7DokZtMNwNlEAyxqSZeCS+CXH7fJ4jx3FuJ/mTW3mE+6JLs4VJBbcll0Kjn+KXI5t21Iw==, + } + source-map-js@1.2.1: resolution: { @@ -2367,6 +2508,13 @@ packages: integrity: sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==, } + uuid@10.0.0: + resolution: + { + integrity: sha512-8XkAphELsDnEGrDxUOHB3RGvXz6TeuYSGEZBOjtTtPm2lwhGBjLgOzLHB63IUWfBpNucQjND6d3AOudO+H3RWQ==, + } + hasBin: true + vite-node@3.2.4: resolution: { @@ -2504,7 +2652,23 @@ packages: } engines: { node: '>=10' } + zod-to-json-schema@3.24.6: + resolution: + { + integrity: sha512-h/z3PKvcTcTetyjl1fkj79MHNEjm+HpD6NXheWjzOekY7kV+lwDYnHw+ivHkijnCSMz1yJaWBD9vu/Fcmk+vEg==, + } + peerDependencies: + zod: ^3.24.1 + + zod@3.25.76: + resolution: + { + integrity: sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==, + } + snapshots: + '@cfworker/json-schema@4.1.1': {} + '@esbuild/aix-ppc64@0.21.5': optional: true @@ -2611,6 +2775,26 @@ snapshots: '@jridgewell/sourcemap-codec@1.5.5': {} + '@langchain/core@0.3.72': + dependencies: + '@cfworker/json-schema': 4.1.1 + ansi-styles: 5.2.0 + camelcase: 6.3.0 + decamelize: 1.2.0 + js-tiktoken: 1.0.21 + langsmith: 0.3.64 + mustache: 4.2.0 + p-queue: 6.6.2 + p-retry: 4.6.2 + uuid: 10.0.0 + zod: 3.25.76 + zod-to-json-schema: 3.24.6(zod@3.25.76) + transitivePeerDependencies: + - '@opentelemetry/api' + - '@opentelemetry/exporter-trace-otlp-proto' + - '@opentelemetry/sdk-trace-base' + - openai + '@nodelib/fs.scandir@2.1.5': dependencies: '@nodelib/fs.stat': 2.0.5 @@ -2699,6 +2883,10 @@ snapshots: dependencies: undici-types: 7.10.0 + '@types/retry@0.12.0': {} + + '@types/uuid@10.0.0': {} + '@typescript-eslint/eslint-plugin@8.40.0(@typescript-eslint/parser@8.40.0(eslint@8.57.1)(typescript@5.9.2))(eslint@8.57.1)(typescript@5.9.2)': dependencies: '@eslint-community/regexpp': 4.12.1 @@ -2855,6 +3043,8 @@ snapshots: dependencies: color-convert: 2.0.1 + ansi-styles@5.2.0: {} + argparse@2.0.1: {} array-buffer-byte-length@1.0.2: @@ -2917,6 +3107,8 @@ snapshots: balanced-match@1.0.2: {} + base64-js@1.5.1: {} + brace-expansion@1.1.12: dependencies: balanced-match: 1.0.2 @@ -2951,6 +3143,8 @@ snapshots: callsites@3.1.0: {} + camelcase@6.3.0: {} + chai@5.3.1: dependencies: assertion-error: 2.0.1 @@ -2974,6 +3168,10 @@ snapshots: concat-map@0.0.1: {} + console-table-printer@2.14.6: + dependencies: + simple-wcswidth: 1.1.2 + cross-spawn@7.0.6: dependencies: path-key: 3.1.1 @@ -3006,6 +3204,8 @@ snapshots: dependencies: ms: 2.1.3 + decamelize@1.2.0: {} + deep-eql@5.0.2: {} deep-is@0.1.4: {} @@ -3279,6 +3479,8 @@ snapshots: esutils@2.0.3: {} + eventemitter3@4.0.7: {} + expect-type@1.2.2: {} fast-deep-equal@3.1.3: {} @@ -3559,6 +3761,10 @@ snapshots: isexe@2.0.0: {} + js-tiktoken@1.0.21: + dependencies: + base64-js: 1.5.1 + js-tokens@9.0.1: {} js-yaml@4.1.0: @@ -3579,6 +3785,16 @@ snapshots: dependencies: json-buffer: 3.0.1 + langsmith@0.3.64: + dependencies: + '@types/uuid': 10.0.0 + chalk: 4.1.2 + console-table-printer: 2.14.6 + p-queue: 6.6.2 + p-retry: 4.6.2 + semver: 7.7.2 + uuid: 10.0.0 + levn@0.4.1: dependencies: prelude-ls: 1.2.1 @@ -3617,6 +3833,8 @@ snapshots: ms@2.1.3: {} + mustache@4.2.0: {} + nanoid@3.3.11: {} natural-compare@1.4.0: {} @@ -3673,6 +3891,8 @@ snapshots: object-keys: 1.1.1 safe-push-apply: 1.0.0 + p-finally@1.0.0: {} + p-limit@3.1.0: dependencies: yocto-queue: 0.1.0 @@ -3681,6 +3901,20 @@ snapshots: dependencies: p-limit: 3.1.0 + p-queue@6.6.2: + dependencies: + eventemitter3: 4.0.7 + p-timeout: 3.2.0 + + p-retry@4.6.2: + dependencies: + '@types/retry': 0.12.0 + retry: 0.13.1 + + p-timeout@3.2.0: + dependencies: + p-finally: 1.0.0 + parent-module@1.0.1: dependencies: callsites: 3.1.0 @@ -3747,6 +3981,8 @@ snapshots: path-parse: 1.0.7 supports-preserve-symlinks-flag: 1.0.0 + retry@0.13.1: {} + reusify@1.1.0: {} rimraf@3.0.2: @@ -3864,6 +4100,8 @@ snapshots: siginfo@2.0.0: {} + simple-wcswidth@1.1.2: {} + source-map-js@1.2.1: {} stackback@0.0.2: {} @@ -4002,6 +4240,8 @@ snapshots: dependencies: punycode: 2.3.1 + uuid@10.0.0: {} + vite-node@3.2.4(@types/node@24.3.0): dependencies: cac: 6.7.14 @@ -4122,3 +4362,9 @@ snapshots: wrappy@1.0.2: {} yocto-queue@0.1.0: {} + + zod-to-json-schema@3.24.6(zod@3.25.76): + dependencies: + zod: 3.25.76 + + zod@3.25.76: {} diff --git a/src/adapters/langchain.ts b/src/adapters/langchain.ts new file mode 100644 index 0000000..0d3c4c6 --- /dev/null +++ b/src/adapters/langchain.ts @@ -0,0 +1,183 @@ +import type { BaseChatModel } from '@langchain/core/language_models/chat_models'; +import { + AIMessage, + BaseMessage, + HumanMessage, + isAIMessage, + SystemMessage, + isHumanMessage, + isSystemMessage, + isToolMessage, +} from '@langchain/core/messages'; + +import { + SummarizeCompressor, + type SummarizeConfig, + TrimCompressor, + type TrimConfig, + type SlimContextChatModel, + type SlimContextCompressor, + type SlimContextMessage, + type SlimContextModelResponse, +} from '..'; + +/** + * Extract plain text from a LangChain message content, which may be a string + * or an array of content parts that could include text entries. + * Only text content is extracted; other content types (such as images or non-text parts) are ignored. + */ +export function extractContent(content: unknown): string { + if (typeof content === 'string') return content.trim(); + if (Array.isArray(content)) { + const parts = content + .map((c) => { + if (typeof c === 'string') return c; + if (typeof c === 'object' && c !== null && 'text' in c) { + const v = (c as { text?: unknown }).text; + return typeof v === 'string' ? v : ''; + } + return ''; + }) + .filter(Boolean); + return parts.join('\n').trim(); + } + return ''; +} + +/** + * Normalize LangChain BaseMessage.getType() (e.g., 'ai', 'human', or 'AIMessage', 'HumanMessage') + * to a SlimContext role. + */ +export function roleFromMessageType(type: string): 'user' | 'assistant' | 'system' | 'tool' { + switch (type) { + case 'ai': + return 'assistant'; + case 'system': + return 'system'; + case 'tool': + return 'tool'; + case 'human': + default: + return 'user'; + } +} + +/** + * Determine a LangChain BaseMessage type + * @param msg + * @returns + */ +export function getLangChainMessageType(msg: BaseMessage): string { + if (isAIMessage(msg)) return 'ai'; + if (isHumanMessage(msg)) return 'human'; + if (isSystemMessage(msg)) return 'system'; + if (isToolMessage(msg)) return 'tool'; + + return 'human'; +} + +/** Convert a LangChain BaseMessage to a SlimContextMessage used by compression. */ +export function baseToSlim(msg: BaseMessage): SlimContextMessage { + const type = getLangChainMessageType(msg); + return { + role: roleFromMessageType(type), + content: extractContent(msg.content as unknown), + }; +} + +/** Map SlimContextMessage back to a LangChain BaseMessage class. */ +export function slimToLangChain(msg: SlimContextMessage): BaseMessage { + switch (msg.role) { + case 'assistant': + return new AIMessage(msg.content); + case 'system': + return new SystemMessage(msg.content); + case 'user': + case 'human': + default: + return new HumanMessage(msg.content); + } +} + +/** Preprocess LangChain messages into SlimContext messages. */ +export function preprocessToSlim(messages: BaseMessage[]): SlimContextMessage[] { + return messages.map(baseToSlim); +} + +/** Postprocess SlimContext messages back into LangChain messages. */ +export function postprocessToLangChain(messages: SlimContextMessage[]): BaseMessage[] { + return messages.map(slimToLangChain); +} + +/** + * Lightweight adapter to bridge a LangChain BaseChatModel to SlimContext's model interface. + */ +export class LangChainSlimModel implements SlimContextChatModel { + private llm: BaseChatModel; + + constructor(llm: BaseChatModel) { + this.llm = llm; + } + + async invoke(messages: SlimContextMessage[]): Promise { + // Convert to LangChain BaseMessage instances + const lcMessages = messages.map(slimToLangChain); + const res = await this.llm.invoke(lcMessages); + const content = extractContent((res as unknown as { content?: unknown })?.content); + return { content }; + } +} + +/** Create a SlimContextChatModel from a LangChain BaseChatModel. */ +export function toSlimModel(llm: BaseChatModel): SlimContextChatModel { + return new LangChainSlimModel(llm); +} + +/** Convenience: build a SummarizeCompressor for LangChain models. */ +export function createSummarizeCompressorForLangChain( + llm: BaseChatModel, + config: Omit, +): SummarizeCompressor { + return new SummarizeCompressor({ model: toSlimModel(llm), ...config }); +} + +/** Convenience: build a TrimCompressor. */ +export function createTrimCompressor(config: TrimConfig): TrimCompressor { + return new TrimCompressor(config); +} + +export type CompressLangChainOptions = + | { compressor: SlimContextCompressor } + | ({ + strategy?: 'summarize'; + llm: BaseChatModel; + } & Omit) + | ({ strategy: 'trim' } & TrimConfig); + +/** + * High-level helper: compress a LangChain message history in one call. + * - Converts LC -> SlimContext, runs a compressor, and converts the result back. + */ +export async function compressLangChainHistory( + history: BaseMessage[], + options: CompressLangChainOptions, +): Promise { + const slimHistory = preprocessToSlim(history); + + let compressor: SlimContextCompressor; + + if ('compressor' in options) { + compressor = options.compressor; + } else if (options.strategy === 'trim') { + compressor = new TrimCompressor(options); + } else { + const { llm, ...summarizeOptions } = options; + compressor = new SummarizeCompressor({ + model: toSlimModel(llm), + ...summarizeOptions, + }); + } + + const compressed = await compressor.compress(slimHistory); + return postprocessToLangChain(compressed); +} diff --git a/src/index.ts b/src/index.ts index bbb0121..4ae4521 100644 --- a/src/index.ts +++ b/src/index.ts @@ -1,3 +1,4 @@ export * from './interfaces'; export * from './strategies/trim'; export * from './strategies/summarize'; +export * as langchain from './adapters/langchain'; diff --git a/src/strategies/trim.ts b/src/strategies/trim.ts index e9cad56..f1462cb 100644 --- a/src/strategies/trim.ts +++ b/src/strategies/trim.ts @@ -1,12 +1,19 @@ import { SlimContextCompressor, SlimContextMessage } from '../interfaces'; +/** + * Trim configuration options for the TrimCompressor (messages to keep). + */ +export interface TrimConfig { + messagesToKeep: number; +} + /** * TrimCompressor keeps the very first message (often a system prompt) and the last N-1 messages. */ export class TrimCompressor implements SlimContextCompressor { private readonly messagesToKeep: number; - constructor(config: { messagesToKeep: number }) { + constructor(config: TrimConfig) { if (config.messagesToKeep < 2) { throw new Error( 'messagesToKeep must be at least 2 to retain the first system message and one recent message', diff --git a/tests/langchain.test.ts b/tests/langchain.test.ts new file mode 100644 index 0000000..8821d17 --- /dev/null +++ b/tests/langchain.test.ts @@ -0,0 +1,139 @@ +import type { BaseChatModel } from '@langchain/core/language_models/chat_models'; +import { AIMessage, BaseMessage, HumanMessage, SystemMessage } from '@langchain/core/messages'; +import type { MessageContentComplex } from '@langchain/core/messages'; +import { Runnable, type RunnableConfig } from '@langchain/core/runnables'; +import { describe, it, expect, vi } from 'vitest'; + +import { + baseToSlim, + compressLangChainHistory, + extractContent, + roleFromMessageType, + slimToLangChain, +} from '../src/adapters/langchain'; +import type { SlimContextMessage } from '../src/interfaces'; + +// Mock BaseChatModel for testing +const createMockChatModel = (responseText: string): BaseChatModel => { + class MockRunnable extends Runnable { + lc_namespace = ['langchain', 'test', 'mock']; + + invoke( + _input: BaseMessage[], + _options?: Partial | undefined, + ): Promise { + return Promise.resolve(new AIMessage(responseText)); + } + } + const model = new MockRunnable(); + return model as unknown as BaseChatModel; +}; + +describe('LangChain Adapter', () => { + describe('extractContent', () => { + it('should extract string content', () => { + expect(extractContent(' hello world ')).toBe('hello world'); + }); + + it('should extract content from a complex array', () => { + const content: MessageContentComplex[] = [ + { type: 'text', text: 'Hello' }, + { type: 'image_url', image_url: 'some_url' }, // Should be ignored + ]; + expect(extractContent([' world', ...content])).toBe('world\nHello'); + }); + + it('should return an empty string for unsupported types', () => { + expect(extractContent(123)).toBe(''); + expect(extractContent(null)).toBe(''); + expect(extractContent({})).toBe(''); + }); + }); + + describe('roleFromMessageType', () => { + it('should map message types to roles correctly', () => { + expect(roleFromMessageType('ai')).toBe('assistant'); + expect(roleFromMessageType('human')).toBe('user'); + expect(roleFromMessageType('system')).toBe('system'); + expect(roleFromMessageType('tool')).toBe('tool'); + expect(roleFromMessageType('generic')).toBe('user'); + }); + }); + + describe('Message Conversion', () => { + it('should convert BaseMessage to SlimContextMessage', () => { + const humanMessage = new HumanMessage('Hello'); + const slimMessage = baseToSlim(humanMessage); + expect(slimMessage).toEqual({ role: 'user', content: 'Hello' }); + }); + + it('should convert SlimContextMessage back to BaseMessage', () => { + const slimMessage: SlimContextMessage = { role: 'assistant', content: 'Hi there' }; + const baseMessage = slimToLangChain(slimMessage); + expect(baseMessage).toBeInstanceOf(AIMessage); + expect(baseMessage.content).toBe('Hi there'); + }); + }); + + describe('compressLangChainHistory', () => { + const history: BaseMessage[] = [ + new SystemMessage('You are a helpful assistant.'), + new HumanMessage('Message 1'), + new AIMessage('Message 2'), + new HumanMessage('Message 3'), + new AIMessage('Message 4'), + ]; + + it('should compress history with the trim strategy', async () => { + const compressed = await compressLangChainHistory(history, { + strategy: 'trim', + messagesToKeep: 3, + }); + + expect(compressed).toHaveLength(3); // System + 2 kept + expect(compressed[0]).toBeInstanceOf(SystemMessage); + expect(compressed[1]).toBeInstanceOf(HumanMessage); + expect(compressed[1].content).toBe('Message 3'); + expect(compressed[2]).toBeInstanceOf(AIMessage); + expect(compressed[2].content).toBe('Message 4'); + }); + + it('should compress history with the summarize strategy', async () => { + const mockModel = createMockChatModel('This is a summary of messages 1 and 2.'); + const invokeSpy = vi.spyOn(mockModel, 'invoke'); + + const compressed = await compressLangChainHistory(history, { + strategy: 'summarize', + llm: mockModel, + maxMessages: 4, + }); + + expect(invokeSpy).toHaveBeenCalled(); + expect(compressed).toHaveLength(4); // System + summary + 2 kept + expect(compressed[0]).toBeInstanceOf(SystemMessage); + expect(compressed[1]).toBeInstanceOf(SystemMessage); // Summary is an System Message + expect(compressed[1].content).toContain('This is a summary of messages 1 and 2.'); + expect(compressed[2].content).toBe('Message 3'); + expect(compressed[3].content).toBe('Message 4'); + }); + + it('should work with a pre-created compressor', async () => { + const customCompressor = { + compress: async (messages: SlimContextMessage[]) => { + return Promise.resolve([ + { role: 'assistant', content: 'Custom summary' }, + ...messages.slice(-1), + ] as SlimContextMessage[]); + }, + }; + + const compressed = await compressLangChainHistory(history, { + compressor: customCompressor, + }); + + expect(compressed).toHaveLength(2); + expect(compressed[0].content).toBe('Custom summary'); + expect(compressed[1].content).toBe('Message 4'); + }); + }); +}); diff --git a/tsconfig.json b/tsconfig.json index 48c4fd7..3cf66f6 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -1,6 +1,6 @@ { "compilerOptions": { - "target": "ES2019", + "target": "ES2020", "module": "CommonJS", "declaration": true, "outDir": "dist",