Skip to content
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@

- Fixed error response handling in stream and non-stream modes


## 1.0.0b6 (2025-11-26)

### Features Added
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -167,20 +167,23 @@ async def _resolve_agent_for_request(self, context: AgentRunContext):
return agent, tool_client_wrapper

def init_tracing(self):
exporter = os.environ.get(AdapterConstants.OTEL_EXPORTER_ENDPOINT)
app_insights_conn_str = os.environ.get(APPINSIGHT_CONNSTR_ENV_NAME)
project_endpoint = os.environ.get(AdapterConstants.AZURE_AI_PROJECT_ENDPOINT)

if exporter or app_insights_conn_str:
from agent_framework.observability import setup_observability

setup_observability(
enable_sensitive_data=True,
otlp_endpoint=exporter,
applicationinsights_connection_string=app_insights_conn_str,
)
elif project_endpoint:
self.setup_tracing_with_azure_ai_client(project_endpoint)
try:
exporter = os.environ.get(AdapterConstants.OTEL_EXPORTER_ENDPOINT)
app_insights_conn_str = os.environ.get(APPINSIGHT_CONNSTR_ENV_NAME)
project_endpoint = os.environ.get(AdapterConstants.AZURE_AI_PROJECT_ENDPOINT)

if exporter or app_insights_conn_str:
from agent_framework.observability import setup_observability

setup_observability(
enable_sensitive_data=True,
otlp_endpoint=exporter,
applicationinsights_connection_string=app_insights_conn_str,
)
elif project_endpoint:
self.setup_tracing_with_azure_ai_client(project_endpoint)
except Exception as e:
logger.warning(f"Failed to initialize tracing: {e}", exc_info=True)
self.tracer = trace.get_tracer(__name__)

def setup_tracing_with_azure_ai_client(self, project_endpoint: str):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ classifiers = [
keywords = ["azure", "azure sdk"]

dependencies = [
"azure-ai-agentserver-core>=1.0.0b5",
"azure-ai-agentserver-core>=1.0.0b7",
"agent-framework-azure-ai>=1.0.0b251112",
"agent-framework-core>=1.0.0b251112",
"opentelemetry-exporter-otlp-proto-grpc>=1.36.0",
Expand Down Expand Up @@ -65,5 +65,9 @@ breaking = false # incompatible python version
pyright = false
verifytypes = false # incompatible python version for -core
verify_keywords = false
mindependency = false # depends on -core package
#mindependency = false # depends on -core package
#latestdependency = false
#whl = false
#depends = false
#pylint = false
whl_no_aio = false
1 change: 0 additions & 1 deletion sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@

- Fixed error response handling in stream and non-stream modes


## 1.0.0b6 (2025-11-26)

### Features Added
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
from opentelemetry import context as otel_context, trace
from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator
from starlette.applications import Starlette
from starlette.concurrency import iterate_in_threadpool
from starlette.middleware.base import BaseHTTPMiddleware
from starlette.middleware.cors import CORSMiddleware
from starlette.requests import Request
Expand Down Expand Up @@ -137,61 +138,46 @@ async def runs_endpoint(request):
context_carrier = {}
TraceContextTextMapPropagator().inject(context_carrier)

ex = None
resp = await self.agent_run(context)

if inspect.isgenerator(resp):
def gen():
ctx = TraceContextTextMapPropagator().extract(carrier=context_carrier)
token = otel_context.attach(ctx)
seq = 0
try:
for event in resp:
seq += 1
yield _event_to_sse_chunk(event)
except Exception as e: # noqa: BLE001
logger.error("Error in non-async generator: %s", e, exc_info=True)
err = project_models.ResponseErrorEvent(
sequence_number=seq + 1,
code=project_models.ResponseErrorCode.SERVER_ERROR,
message=_format_error(e),
param="")
yield _event_to_sse_chunk(err)
finally:
logger.info("End of processing CreateResponse request.")
otel_context.detach(token)

return StreamingResponse(gen(), media_type="text/event-stream")
if inspect.isasyncgen(resp):
async def gen_async():
ctx = TraceContextTextMapPropagator().extract(carrier=context_carrier)
token = otel_context.attach(ctx)
seq = 0
try:
async for event in resp:
seq += 1
yield _event_to_sse_chunk(event)
except Exception as e: # noqa: BLE001
logger.error("Error in async generator: %s", e, exc_info=True)
err = project_models.ResponseErrorEvent(
sequence_number=seq + 1,
code=project_models.ResponseErrorCode.SERVER_ERROR,
message=_format_error(e),
param="")
yield _event_to_sse_chunk(err)
finally:
logger.info("End of processing CreateResponse request.")
otel_context.detach(token)

return StreamingResponse(gen_async(), media_type="text/event-stream")
logger.info("End of processing CreateResponse request.")
return JSONResponse(resp.as_dict())
except Exception as e:
# TODO: extract status code from exception
logger.error(f"Error processing CreateResponse request: {e}", exc_info=True)
err = project_models.ResponseError(
ex = e

if not context.stream:
logger.info("End of processing CreateResponse request.")
result = resp if not ex else project_models.ResponseError(
code=project_models.ResponseErrorCode.SERVER_ERROR,
message=_format_error(e))
return JSONResponse(err.as_dict())
message=_format_error(ex))
return JSONResponse(result.as_dict())

async def gen_async(ex):
ctx = TraceContextTextMapPropagator().extract(carrier=context_carrier)
token = otel_context.attach(ctx)
seq = 0
try:
if ex:
return
it = iterate_in_threadpool(resp) if inspect.isgenerator(resp) else resp
async for event in it:
seq += 1
yield _event_to_sse_chunk(event)
logger.info("End of processing CreateResponse request.")
except Exception as e: # noqa: BLE001
logger.error("Error in async generator: %s", e, exc_info=True)
ex = e
finally:
if ex:
err = project_models.ResponseErrorEvent(
sequence_number=seq + 1,
code=project_models.ResponseErrorCode.SERVER_ERROR,
message=_format_error(ex),
param="")
yield _event_to_sse_chunk(err)
otel_context.detach(token)

return StreamingResponse(gen_async(ex), media_type="text/event-stream")

async def liveness_endpoint(request):
result = await self.agent_liveness(request)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@

- Fixed error response handling in stream and non-stream modes


## 1.0.0b6 (2025-11-26)

### Feature Added
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -268,7 +268,7 @@ async def agent_run_non_stream(self, input_data: dict, context: AgentRunContext,
output = self.state_converter.state_to_response(result, context)
return output
except Exception as e:
logger.error(f"Error during agent run: {e}")
logger.error(f"Error during agent run: {e}", exc_info=True)
raise e

async def agent_run_astream(
Expand Down Expand Up @@ -301,7 +301,7 @@ async def agent_run_astream(
async for result in self.state_converter.state_to_response_stream(stream, context):
yield result
except Exception as e:
logger.error(f"Error during streaming agent run: {e}")
logger.error(f"Error during streaming agent run: {e}", exc_info=True)
raise e
finally:
# Close tool_client if provided
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ classifiers = [
keywords = ["azure", "azure sdk"]

dependencies = [
"azure-ai-agentserver-core>=1.0.0b5",
"azure-ai-agentserver-core>=1.0.0b7",
"langchain>0.3.20",
"langchain-openai>0.3.10",
"langchain-azure-ai[opentelemetry]>=0.1.8",
Expand Down Expand Up @@ -64,5 +64,9 @@ breaking = false # incompatible python version
pyright = false
verifytypes = false # incompatible python version for -core
verify_keywords = false
mindependency = false # depends on -core package
#mindependency = false # depends on -core package
#latestdependency = false
#whl = false
#depends = false
#pylint = false
whl_no_aio = false