Skip to content

Commit d7efba2

Browse files
committed
refactor: Remove redundant try/except around opentelemetry imports
Use module-level tracer instead of defensive inline imports
1 parent 7707e87 commit d7efba2

File tree

2 files changed

+5
-31
lines changed

2 files changed

+5
-31
lines changed

redis_sre_agent/agent/knowledge_agent.py

Lines changed: 3 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -165,20 +165,12 @@ def _sanitize_messages_for_llm(msgs: list[BaseMessage]) -> list[BaseMessage]:
165165
try:
166166
import time as _time
167167

168-
try:
169-
from opentelemetry import trace as _otel_trace # type: ignore
170-
except Exception:
171-
_otel_trace = None # type: ignore
172168
from redis_sre_agent.observability.llm_metrics import record_llm_call_metrics
173169

174170
_t0 = _time.perf_counter()
175-
_tr = _otel_trace.get_tracer(__name__) if _otel_trace else None
176-
if _tr:
177-
with _tr.start_as_current_span(
178-
"llm.call", attributes={"llm.component": "knowledge"}
179-
):
180-
response = await llm_with_tools.ainvoke(messages)
181-
else:
171+
with tracer.start_as_current_span(
172+
"llm.call", attributes={"llm.component": "knowledge"}
173+
):
182174
response = await llm_with_tools.ainvoke(messages)
183175
record_llm_call_metrics(
184176
component="knowledge", llm=llm_with_tools, response=response, start_time=_t0

redis_sre_agent/agent/runbook_generator.py

Lines changed: 2 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -235,18 +235,10 @@ async def _generate_research_summary(
235235

236236
import time as _time
237237

238-
try:
239-
from opentelemetry import trace as _otel_trace # type: ignore
240-
except Exception:
241-
_otel_trace = None # type: ignore
242238
from redis_sre_agent.observability.llm_metrics import record_llm_call_metrics
243239

244240
_t0 = _time.perf_counter()
245-
_tr = _otel_trace.get_tracer(__name__) if _otel_trace else None
246-
if _tr:
247-
with _tr.start_as_current_span("llm.call", attributes={"llm.component": "runbook"}):
248-
response = await self.llm.ainvoke([HumanMessage(content=prompt)])
249-
else:
241+
with tracer.start_as_current_span("llm.call", attributes={"llm.component": "runbook"}):
250242
response = await self.llm.ainvoke([HumanMessage(content=prompt)])
251243
record_llm_call_metrics(
252244
component="runbook", llm=self.llm, response=response, start_time=_t0
@@ -480,20 +472,10 @@ async def _evaluate_runbook(self, runbook: GeneratedRunbook) -> RunbookEvaluatio
480472

481473
import time as _time
482474

483-
try:
484-
from opentelemetry import trace as _otel_trace # type: ignore
485-
except Exception:
486-
_otel_trace = None # type: ignore
487475
from redis_sre_agent.observability.llm_metrics import record_llm_call_metrics
488476

489477
_t0 = _time.perf_counter()
490-
_tr = _otel_trace.get_tracer(__name__) if _otel_trace else None
491-
if _tr:
492-
with _tr.start_as_current_span("llm.call", attributes={"llm.component": "runbook"}):
493-
response = await self.llm.ainvoke(
494-
[SystemMessage(content=system_prompt), HumanMessage(content=user_prompt)]
495-
)
496-
else:
478+
with tracer.start_as_current_span("llm.call", attributes={"llm.component": "runbook"}):
497479
response = await self.llm.ainvoke(
498480
[SystemMessage(content=system_prompt), HumanMessage(content=user_prompt)]
499481
)

0 commit comments

Comments
 (0)