Skip to content

Commit 3d1013d

Browse files
committed
refactor: Remove redundant try/except around opentelemetry imports
Use module-level tracer instead of defensive inline imports in knowledge_agent.py, runbook_generator.py, and knowledge_base.py. Also update test for two ChatOpenAI instances in SRELangGraphAgent.
1 parent d7efba2 commit 3d1013d

File tree

4 files changed

+47
-78
lines changed

4 files changed

+47
-78
lines changed

redis_sre_agent/agent/knowledge_agent.py

Lines changed: 3 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -147,19 +147,10 @@ def _sanitize_messages_for_llm(msgs: list[BaseMessage]) -> list[BaseMessage]:
147147
return clean
148148

149149
# OTel: capture sanitize phase
150-
try:
151-
from opentelemetry import trace as _otel_trace # type: ignore
152-
153-
_tr = _otel_trace.get_tracer(__name__)
154-
except Exception:
155-
_tr = None # type: ignore
156150
_pre_count = len(messages)
157-
if _tr:
158-
with _tr.start_as_current_span(
159-
"knowledge.agent.sanitize", attributes={"messages.pre": _pre_count}
160-
):
161-
messages = _sanitize_messages_for_llm(messages)
162-
else:
151+
with tracer.start_as_current_span(
152+
"knowledge.agent.sanitize", attributes={"messages.pre": _pre_count}
153+
):
163154
messages = _sanitize_messages_for_llm(messages)
164155

165156
try:

redis_sre_agent/agent/langgraph_agent.py

Lines changed: 11 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -320,12 +320,18 @@ def __init__(self, progress_callback=None):
320320
"""Initialize the SRE LangGraph agent."""
321321
self.settings = settings
322322
self.progress_callback = progress_callback
323-
# Single LLM with both reasoning and function calling capabilities
323+
# LLM with both reasoning and function calling capabilities
324324
self.llm = ChatOpenAI(
325325
model=self.settings.openai_model,
326326
openai_api_key=self.settings.openai_api_key,
327327
timeout=self.settings.llm_timeout,
328328
)
329+
# Faster LLM for utility tasks
330+
self.mini_llm = ChatOpenAI(
331+
model=self.settings.openai_model_mini,
332+
openai_api_key=self.settings.openai_api_key,
333+
timeout=self.settings.llm_timeout,
334+
)
329335

330336
# Tools will be loaded per-query using ToolManager
331337
# No tools bound at initialization - they're bound per conversation
@@ -920,7 +926,9 @@ def _parse_tool_json_blocks(tool_msg_text: str) -> Optional[dict]:
920926
try:
921927
from .models import TopicsList
922928

923-
extractor_llm = self.llm.with_structured_output(TopicsList) # return TopicsList
929+
extractor_llm = self.mini_llm.with_structured_output(
930+
TopicsList
931+
) # return TopicsList
924932
instance_ctx = {
925933
"instance_type": target_instance.instance_type,
926934
"name": target_instance.name,
@@ -990,12 +998,7 @@ def _sev_score(t: dict) -> int:
990998
knowledge_tools = tool_mgr.get_tools_for_capability(_ToolCap.KNOWLEDGE)
991999
knowledge_adapters = await _build_adapters(tool_mgr, knowledge_tools)
9921000
if knowledge_adapters:
993-
knowledge_llm_base = ChatOpenAI(
994-
model=self.settings.openai_model_mini,
995-
openai_api_key=self.settings.openai_api_key,
996-
timeout=self.settings.llm_timeout,
997-
)
998-
knowledge_llm = knowledge_llm_base.bind_tools(knowledge_adapters)
1001+
knowledge_llm = self.mini_llm.bind_tools(knowledge_adapters)
9991002

10001003
if knowledge_adapters:
10011004
logger.info(

redis_sre_agent/tools/knowledge/knowledge_base.py

Lines changed: 24 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -272,24 +272,16 @@ async def ingest(
272272
kwargs["product_labels"] = product_labels
273273

274274
# OTel: instrument ingestion
275-
try:
276-
from opentelemetry import trace as _otel_trace # type: ignore
277-
278-
_tr = _otel_trace.get_tracer(__name__)
279-
except Exception:
280-
_tr = None # type: ignore
281-
if _tr:
282-
with _tr.start_as_current_span(
283-
"tool.knowledge.ingest",
284-
attributes={
285-
"title.len": len(title or ""),
286-
"category": str(category or ""),
287-
"has.labels": bool(product_labels),
288-
"has.severity": bool(severity),
289-
},
290-
):
291-
return await _ingest_sre_document(**kwargs)
292-
return await _ingest_sre_document(**kwargs)
275+
with tracer.start_as_current_span(
276+
"tool.knowledge.ingest",
277+
attributes={
278+
"title.len": len(title or ""),
279+
"category": str(category or ""),
280+
"has.labels": bool(product_labels),
281+
"has.severity": bool(severity),
282+
},
283+
):
284+
return await _ingest_sre_document(**kwargs)
293285

294286
async def get_all_fragments(self, document_hash: str) -> Dict[str, Any]:
295287
"""Get all fragments of a document.
@@ -301,19 +293,11 @@ async def get_all_fragments(self, document_hash: str) -> Dict[str, Any]:
301293
Dictionary with all document fragments
302294
"""
303295
logger.info(f"Getting all fragments for document: {document_hash}")
304-
try:
305-
from opentelemetry import trace as _otel_trace # type: ignore
306-
307-
_tr = _otel_trace.get_tracer(__name__)
308-
except Exception:
309-
_tr = None # type: ignore
310-
if _tr:
311-
with _tr.start_as_current_span(
312-
"tool.knowledge.get_all_fragments",
313-
attributes={"document_hash": str(document_hash)[:16]},
314-
):
315-
return await get_all_document_fragments(document_hash)
316-
return await get_all_document_fragments(document_hash)
296+
with tracer.start_as_current_span(
297+
"tool.knowledge.get_all_fragments",
298+
attributes={"document_hash": str(document_hash)[:16]},
299+
):
300+
return await get_all_document_fragments(document_hash)
317301

318302
async def get_related_fragments(
319303
self, document_hash: str, chunk_index: int, limit: int = 10
@@ -329,20 +313,12 @@ async def get_related_fragments(
329313
Dictionary with related fragments
330314
"""
331315
logger.info(f"Getting related fragments for document {document_hash}, chunk {chunk_index}")
332-
try:
333-
from opentelemetry import trace as _otel_trace # type: ignore
334-
335-
_tr = _otel_trace.get_tracer(__name__)
336-
except Exception:
337-
_tr = None # type: ignore
338-
if _tr:
339-
with _tr.start_as_current_span(
340-
"tool.knowledge.get_related_fragments",
341-
attributes={
342-
"document_hash": str(document_hash)[:16],
343-
"chunk_index": int(chunk_index),
344-
"limit": int(limit),
345-
},
346-
):
347-
return await get_related_document_fragments(document_hash, chunk_index, limit)
348-
return await get_related_document_fragments(document_hash, chunk_index, limit)
316+
with tracer.start_as_current_span(
317+
"tool.knowledge.get_related_fragments",
318+
attributes={
319+
"document_hash": str(document_hash)[:16],
320+
"chunk_index": int(chunk_index),
321+
"limit": int(limit),
322+
},
323+
):
324+
return await get_related_document_fragments(document_hash, chunk_index, limit)

tests/unit/agent/test_temperature_removal.py

Lines changed: 9 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -43,16 +43,15 @@ def test_langgraph_agent_no_temperature(self, mock_chat_openai):
4343

4444
SRELangGraphAgent()
4545

46-
# Verify ChatOpenAI was called without temperature
47-
mock_chat_openai.assert_called_once()
48-
call_args = mock_chat_openai.call_args
49-
50-
# Check that temperature is not in the arguments
51-
assert "temperature" not in call_args.kwargs
52-
53-
# Verify required arguments are present
54-
assert "model" in call_args.kwargs
55-
assert "openai_api_key" in call_args.kwargs
46+
# Verify ChatOpenAI was called twice (self.llm and self.mini_llm)
47+
assert mock_chat_openai.call_count == 2
48+
49+
# Check all calls to ensure none use temperature
50+
for call_args in mock_chat_openai.call_args_list:
51+
assert "temperature" not in call_args.kwargs
52+
# Verify required arguments are present
53+
assert "model" in call_args.kwargs
54+
assert "openai_api_key" in call_args.kwargs
5655

5756
@patch("redis_sre_agent.evaluation.judge.ChatOpenAI")
5857
def test_judge_no_temperature(self, mock_chat_openai):

0 commit comments

Comments
 (0)