1717from langchain_openai import ChatOpenAI
1818from langgraph .checkpoint .memory import MemorySaver
1919from langgraph .graph import END , StateGraph
20+ from langgraph .prebuilt import ToolNode as LGToolNode
2021from opentelemetry import trace
2122from pydantic import BaseModel , Field
2223
2930 save_instances ,
3031)
3132from ..tools .manager import ToolManager
33+ from .helpers import build_adapters_for_tooldefs as _build_adapters
3234from .helpers import log_preflight_messages
3335from .prompts import SRE_SYSTEM_PROMPT
36+ from .subgraphs .safety_fact_corrector import build_safety_fact_corrector
3437
3538logger = logging .getLogger (__name__ )
3639tracer = trace .get_tracer (__name__ )
@@ -284,12 +287,6 @@ def _extract_instance_details_from_message(message: str) -> Optional[Dict[str, s
284287 return None
285288
286289
287- # SRE-focused system prompt
288- # Prompts moved to redis_sre_agent/agent/prompts.py
289-
290- # Fact-checker prompt moved to redis_sre_agent/agent/prompts.py
291-
292-
293290class AgentState (TypedDict ):
294291 """State schema for the SRE LangGraph agent."""
295292
@@ -337,9 +334,9 @@ def __init__(self, progress_callback=None):
337334 # No tools bound at initialization - they're bound per conversation
338335 self .llm_with_tools = self .llm # Will be rebound with tools per query
339336
340- # Workflow will be built per-query with the appropriate ToolManager
341- # Note: We create a new MemorySaver for each query to ensure proper isolation
342- # This prevents cross-contamination between different tasks/threads
337+ # Workflow will be built per-query with the appropriate ToolManager.
338+ # Note: We create a new MemorySaver for each query to ensure proper isolation.
339+ # This prevents cross-contamination between different tasks/threads.
343340
344341 logger .info ("SRE LangGraph agent initialized (tools loaded per-query)" )
345342
@@ -799,10 +796,6 @@ async def tool_node(state: AgentState) -> AgentState:
799796
800797 # 2) Build StructuredTool adapters once (resolve via ToolManager)
801798 try :
802- from langgraph .prebuilt import ToolNode as LGToolNode
803-
804- from .helpers import build_adapters_for_tooldefs as _build_adapters
805-
806799 # Centralized adapter builder (returns StructuredTool adapters)
807800 adapters = await _build_adapters (tool_mgr , list (tooldefs_by_name .values ()))
808801
@@ -992,8 +985,6 @@ def _sev_score(t: dict) -> int:
992985 # Build knowledge-only adapters locally (mini model)
993986 from redis_sre_agent .tools .models import ToolCapability as _ToolCap
994987
995- from .helpers import build_adapters_for_tooldefs as _build_adapters
996-
997988 # Use all knowledge tools for the mini knowledge agent; no op-level filtering.
998989 knowledge_tools = tool_mgr .get_tools_for_capability (_ToolCap .KNOWLEDGE )
999990 knowledge_adapters = await _build_adapters (tool_mgr , knowledge_tools )
@@ -1559,8 +1550,6 @@ def get(self, key: str, default: Any = None):
15591550 for tool in tools :
15601551 logger .debug (f" - { tool .name } " )
15611552
1562- from .helpers import build_adapters_for_tooldefs as _build_adapters
1563-
15641553 adapters = await _build_adapters (tool_mgr , tools )
15651554
15661555 # Rebind LLM with tools for this query
@@ -1702,10 +1691,10 @@ async def process_query(
17021691 conversation_history ,
17031692 )
17041693
1705- # Skip correction if out of Redis scope
1694+ # Skip correction if this message isn't about Redis
17061695 try :
17071696 if not (self ._is_redis_scoped (query ) or self ._is_redis_scoped (response )):
1708- logger .info ("Skipping safety/fact-corrector (out of Redis scope )" )
1697+ logger .info ("Skipping safety/fact-corrector (topic may not be Redis )" )
17091698 return response
17101699 except Exception :
17111700 pass
@@ -1715,10 +1704,6 @@ async def process_query(
17151704 return response
17161705
17171706 # Build a small, bounded corrector with knowledge + utilities tools only
1718- from langchain_openai import ChatOpenAI
1719-
1720- from .subgraphs .safety_fact_corrector import build_safety_fact_corrector
1721-
17221707 # Use always-on providers (knowledge, utilities)
17231708 async with ToolManager (redis_instance = None ) as corrector_tool_manager :
17241709 # Select knowledge and utility tools via capabilities.
@@ -1730,17 +1715,10 @@ async def process_query(
17301715 tooldefs = list (knowledge_defs ) + list (utilities_defs )
17311716
17321717 # Build StructuredTool adapters centrally
1733- from .helpers import build_adapters_for_tooldefs as _build_adapters
1734-
17351718 adapters = await _build_adapters (corrector_tool_manager , tooldefs )
17361719
17371720 # LLM with tools bound via adapters
1738- corrector_llm_base = ChatOpenAI (
1739- model = self .settings .openai_model_mini ,
1740- openai_api_key = self .settings .openai_api_key ,
1741- timeout = self .settings .llm_timeout ,
1742- )
1743- corrector_llm = corrector_llm_base .bind_tools (adapters )
1721+ corrector_llm = self .mini_llm .bind_tools (adapters )
17441722
17451723 # Build the compiled subgraph
17461724 corrector = build_safety_fact_corrector (
0 commit comments