Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 13 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,10 @@ See this simple [guide](#example-create-custom-roles-to-interact-with-openrouter

🚨 **Announcement** 🚨

Basic MCP support now available via [vim-ai-provider-openai-mcp](https://github.com/kracejic/vim-ai-provider-openai-mcp) plugin.

AIChat is now asynchronous, you can continue using vim while the completion is happening in the background. You can even have multiple chat completions running in parallel in one vim instance. Tips: Use `options.streaming=1` to see the results coming in. Use `:AIStopChat` to stop completion. You can enable this with: `let g:vim_ai_async_chat = 1`.

`vim-ai` can now be extended with custom provider plugins.
However, there aren't many available yet, so developing new ones is welcome!
For more, see the [providers](#providers) section.
Expand Down Expand Up @@ -99,10 +103,11 @@ To use an AI command, type the command followed by an instruction prompt. You ca
```
========== Basic AI commands ==========

:AI complete text
:AIEdit edit text
:AIChat continue or open new chat
:AIImage generate image
:AI complete text
:AIEdit edit text
:AIChat continue or open new chat
:AIStopChat stop the generation of the AI response for the AIChat
:AIImage generate image

============== Utilities ==============

Expand Down Expand Up @@ -134,6 +139,7 @@ This is the list of 3rd party provider plugins allowing to use different AI prov

- [google provider](https://github.com/madox2/vim-ai-provider-google) - Google's Gemini models
- [OpenAI Responses API Provider](https://github.com/kevincojean/vim-ai-provider-openai-responses) - OpenAI Responses API compatibility plug-in
- [OpenAI Provider with MCP support](https://github.com/kracejic/vim-ai-provider-openai-mcp)

In case you are interested in developing one, have a look at reference [google provider](https://github.com/madox2/vim-ai-provider-google).
Do not forget to open PR updating this list.
Expand Down Expand Up @@ -489,6 +495,9 @@ let g:vim_ai_token_file_path = "~/.config/openai.token"
" custom fn to load token, e.g. "g:GetAIToken()"
let g:vim_ai_token_load_fn = ""

" enable asynchronout AIChat (disabled by default)
let g:vim_ai_async_chat = 1

" enables/disables full markdown highlighting in aichat files
" NOTE: code syntax highlighting works out of the box without this option enabled
" NOTE: highlighting may be corrupted when using together with the `preservim/vim-markdown`
Expand Down
64 changes: 63 additions & 1 deletion autoload/vim_ai.vim
Original file line number Diff line number Diff line change
Expand Up @@ -308,15 +308,77 @@ function! vim_ai#AIChatRun(uses_range, config, ...) range abort
call s:set_paste(l:config)
call s:ReuseOrCreateChatWindow(l:config)

let l:context['bufnr'] = bufnr()
let l:bufnr = bufnr()

if py3eval("ai_job_pool.is_job_done(unwrap('l:bufnr'))") == 0
echoerr "Operation in progress, wait or stop it with :AIStopChat"
return
endif

let s:last_command = "chat"
let s:last_config = a:config

py3 run_ai_chat(unwrap('l:context'))
if py3eval("run_ai_chat(unwrap('l:context'))")
if g:vim_ai_async_chat == 1
call appendbufline(l:bufnr, '$', "")
call appendbufline(l:bufnr, '$', "<<< thinking -")
call timer_start(1000, function('vim_ai#AIChatWatch', [l:bufnr, 0]))
endif
endif
finally
call s:set_nopaste(l:config)
endtry
endfunction

" Stop current chat job
function! vim_ai#AIChatStopRun() abort
if &filetype !=# 'aichat'
echoerr "Not in an AI chat buffer."
return
endif
let l:bufnr = bufnr('%')
call s:ImportPythonModules() " Ensure chat.py is loaded
py3 ai_job_pool.cancel_job(unwrap('l:bufnr'))
endfunction


" Function called in a timer that check if there are new lines from AI and
" appned them in a buffer. It ends when AI thread is finished (or when
" stopped).
function! vim_ai#AIChatWatch(bufnr, anim, timerid) abort
" inject new lines, first check if it is done to avoid data race, we do not
" mind if we run the timer one more time, but we want all the data
let l:done = py3eval("ai_job_pool.is_job_done(unwrap('a:bufnr'))")
let l:result = py3eval("ai_job_pool.pickup_lines(unwrap('a:bufnr'))")
call deletebufline(a:bufnr, '$')
call deletebufline(a:bufnr, '$')
call appendbufline(a:bufnr, '$', l:result)

" if not done, queue timer and animate
if l:done == 0
if a:anim == 0
call timer_start(1000, function('vim_ai#AIChatWatch', [a:bufnr, 1]))
call appendbufline(a:bufnr, '$', "")
call appendbufline(a:bufnr, '$', "<<< thinking /")
else
call timer_start(1000, function('vim_ai#AIChatWatch', [a:bufnr, 0]))
call appendbufline(a:bufnr, '$', "")
call appendbufline(a:bufnr, '$', "<<< thinking \\")
endif
else
" Clear message
" https://neovim.discourse.group/t/how-to-clear-the-echo-message-in-the-command-line/268/3
call feedkeys(':','nx')
end

" if window is visible, scroll down
let winid = bufwinid(a:bufnr)
if winid != -1
call win_execute(winid, "normal! G")
endif
endfunction

" Start a new chat
" a:1 - optional preset shorcut (below, right, tab)
function! vim_ai#AINewChatDeprecatedRun(...)
Expand Down
3 changes: 3 additions & 0 deletions autoload/vim_ai_config.vim
Original file line number Diff line number Diff line change
Expand Up @@ -133,6 +133,9 @@ endif
if !exists("g:vim_ai_roles_config_file")
let g:vim_ai_roles_config_file = s:plugin_root . "/roles-example.ini"
endif
if !exists("g:vim_ai_async_chat")
let g:vim_ai_async_chat = 0
endif

function! vim_ai_config#ExtendDeep(defaults, override) abort
let l:result = a:defaults
Expand Down
2 changes: 1 addition & 1 deletion doc/tags
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
:AIUtilDebugOff vim-ai.txt /*:AIUtilDebugOff*
:AIUtilDebugOn vim-ai.txt /*:AIUtilDebugOn*
:AIUtilOpenRoles vim-ai.txt /*:AIUtilOpenRoles*
g:aichat_markdown vim-ai.txt /*g:aichat_markdown*
g:vim_ai_chat_markdown vim-ai.txt /*g:vim_ai_chat_markdown*
vim-ai vim-ai.txt /*vim-ai*
vim-ai-about vim-ai.txt /*vim-ai-about*
vim-ai-commands vim-ai.txt /*vim-ai-commands*
Expand Down
7 changes: 7 additions & 0 deletions doc/vim-ai.txt
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,13 @@ Options: >
Check OpenAI docs for more information:
https://platform.openai.com/docs/api-reference/chat

*:AIStopChat*
AIStopChat Cancel the currently running AI chat
generation for the active chat buffer.
If no task is running or if it has already
completed, this command has no effect.


INCLUDE FILES *vim-ai-include*

To include files in the chat a special `include` role is used: >
Expand Down
1 change: 1 addition & 0 deletions plugin/vim-ai.vim
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ command! -range -nargs=? -complete=customlist,vim_ai#RoleCompletionChat AIChat <
command! -range -nargs=? -complete=customlist,vim_ai#RoleCompletionImage AIImage <line1>,<line2>call vim_ai#AIImageRun(<range>, {}, <q-args>)
command! -nargs=? AINewChat call vim_ai#AINewChatDeprecatedRun(<f-args>)
command! AIRedo call vim_ai#AIRedoRun()
command! AIStopChat call vim_ai#AIChatStopRun()
command! AIUtilRolesOpen call vim_ai#AIUtilRolesOpen()
command! AIUtilDebugOn call vim_ai#AIUtilSetDebug(1)
command! AIUtilDebugOff call vim_ai#AIUtilSetDebug(0)
176 changes: 155 additions & 21 deletions py/chat.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,9 @@
import vim
import threading
import time
import copy
import json
import traceback

chat_py_imported = True

Expand Down Expand Up @@ -61,7 +66,7 @@ def initialize_chat_window():
vim.command("redraw")

last_role = re.match(r".*^(>>>|<<<) (\w+)", file_content, flags=re.DOTALL | re.MULTILINE)
if last_role and last_role.group(2) not in ('user', 'include', 'exec'):
if last_role and last_role.group(2) not in ('user', 'include', 'exec', 'tool_call', 'tool_response', 'info'):
# last role is not a user role, most likely completion was cancelled before
vim.command("normal! o")
vim.command("normal! i\n>>> user\n\n")
Expand Down Expand Up @@ -93,33 +98,162 @@ def initialize_chat_window():

# if empty :AIC has been called outside of the chat, just init/switch to the chat but don't trigger the request (#147)
should_imediately_answer = prompt or started_from_chat
awaiting_response = last_content['type'] != 'text' or last_content['text']
awaiting_response = last_content['type'] != 'text' or last_content['text'] or "tool_calls" in messages[-1]
if awaiting_response and should_imediately_answer:
vim.command("redraw")

print('Answering...')
vim.command("redraw")
provider_class = load_provider(provider)
provider = provider_class(command_type, options, ai_provider_utils)
response_chunks = provider.request(messages)

def _chunks_to_sections(chunks):
first_thinking_chunk = True
first_content_chunk = True
for chunk in chunks:
if chunk['type'] == 'thinking' and first_thinking_chunk:
first_thinking_chunk = False
vim.command("normal! Go\n<<< thinking\n\n")
if chunk['type'] == 'assistant' and first_content_chunk:
first_content_chunk = False
vim.command("normal! Go\n<<< assistant\n\n")
yield chunk['content']

render_text_chunks(_chunks_to_sections(response_chunks), append_to_eol=True)

vim.command("normal! a\n\n>>> user\n\n")
vim.command("redraw")
clear_echo_message()

if vim.eval("g:vim_ai_async_chat") == "1":
ai_job_pool.new_job(context, messages, provider)
else:
response_chunks = provider.request(messages)

def _chunks_to_sections(chunks):
first_thinking_chunk = True
first_content_chunk = True
for chunk in chunks:
if chunk['type'] == 'thinking' and first_thinking_chunk:
first_thinking_chunk = False
vim.command("normal! Go\n<<< thinking\n\n")
if chunk['type'] == 'assistant' and first_content_chunk:
first_content_chunk = False
vim.command("normal! Go\n<<< assistant\n\n")
yield chunk['content']

render_text_chunks(_chunks_to_sections(response_chunks), append_to_eol=True)

vim.command("normal! a\n\n>>> user\n\n")
vim.command("redraw")
clear_echo_message()

return True
else:
return False
except BaseException as error:
handle_completion_error(provider, error)
print_debug("[{}] error: {}", command_type, traceback.format_exc())


# wraps the AI chat job, shall be unique to a buffer
class AI_chat_job(threading.Thread):
def __init__(self, context, messages, provider):
threading.Thread.__init__(self)
self.lines = []
self.buffer = ""
self.previous_type = ""
self.messages = messages
self.context = context
self.cancelled = False
self.provider = provider
self.done = False
self.lock = threading.RLock()

def run(self):
print_debug_threaded("AI_chat_job thread STARTED")
try:
for chunk in self.provider.request(self.messages):
with self.lock:
# For now, we only append whole lines to the buffer
print_debug_threaded(f"Received chunk: '{chunk["type"]}' => '{chunk["content"]}'")
if self.previous_type != chunk["type"] or "newsegment" in chunk:
if self.previous_type != "":
self.buffer += "\n"
self.buffer += "\n<<< " + chunk["type"] + "\n\n"
self.previous_type = chunk["type"]
self.buffer += chunk["content"]
if self.cancelled:
self.buffer += "\n\nCANCELLED by user"
print_debug_threaded("AI_chat_job cancelled during provider request")
if "\n" in self.buffer:
parts = self.buffer.split("\n")
self.lines.extend(parts[:-1])
self.buffer = parts[-1]
if self.cancelled:
break # Exit the loop
except Exception as e:
with self.lock:
self.lines.append("")
self.lines.append(f"<<< error getting response: {str(e)}")
self.lines.append("")
self.lines.append("```python")
self.lines.extend(traceback.format_exc().split("\n"))
self.lines.append("```")
try:
self.lines.append("")
self.lines.append(json.loads(e.read().decode())["error"]["message"])
except:
pass
finally:
with self.lock:
self.lines.append(self.buffer)
if self.previous_type == "assistant":
self.lines.extend("\n>>> user\n\n".split("\n"))
self.done = True
print_debug_threaded("AI_chat_job thread DONE")

def pickup(self):
with self.lock:
lines = copy.deepcopy(self.lines)
self.lines = []
return lines

def is_done(self):
with self.lock:
done = self.done
return done

def cancel(self):
with self.lock:
self.cancelled = True

# Pool of AI chat jobs accessible by bufnr
# There can be only one in progress per bufnr
class AI_chat_jobs_pool(object):
def __init__(self):
self.pool = {}

def new_job(self, context, messages, provider):
bufnr = context["bufnr"]
update_debug_variables()
self.pool[bufnr] = AI_chat_job(context, messages, provider)
self.pool[bufnr].start()
return self.pool[bufnr]

# pickup lines from a job based on bufnr
def pickup_lines(self, bufnr):
if bufnr in self.pool:
lines = self.pool[bufnr].pickup()
ret = []
for line in lines:
ret.append(line)
return ret
else:
return []

def is_job_done(self, bufnr):
if bufnr in self.pool:
if self.pool[bufnr].is_done():
return 1
return 0
else:
return 1

def cancel_job(self, bufnr):
print_debug_threaded(f"Attempting to cancel job for bufnr {bufnr}")
if bufnr in self.pool:
job = self.pool[bufnr]
if not job.is_done():
job.cancel()
print_debug_threaded(f"Cancellation signal sent to job for bufnr {bufnr}")
return True
else:
print_debug_threaded(f"Job for bufnr {bufnr} is already done.")
return False
print_debug_threaded(f"No active job found for bufnr {bufnr} to cancel.")
return False

ai_job_pool = AI_chat_jobs_pool()
Loading