diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md index ec53a2a60c03..84c4a76a27e5 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md @@ -11,7 +11,6 @@ - Fixed error response handling in stream and non-stream modes - ## 1.0.0b6 (2025-11-26) ### Features Added diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py index 4a0a074bd635..233436ac84ea 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py @@ -167,20 +167,23 @@ async def _resolve_agent_for_request(self, context: AgentRunContext): return agent, tool_client_wrapper def init_tracing(self): - exporter = os.environ.get(AdapterConstants.OTEL_EXPORTER_ENDPOINT) - app_insights_conn_str = os.environ.get(APPINSIGHT_CONNSTR_ENV_NAME) - project_endpoint = os.environ.get(AdapterConstants.AZURE_AI_PROJECT_ENDPOINT) - - if exporter or app_insights_conn_str: - from agent_framework.observability import setup_observability - - setup_observability( - enable_sensitive_data=True, - otlp_endpoint=exporter, - applicationinsights_connection_string=app_insights_conn_str, - ) - elif project_endpoint: - self.setup_tracing_with_azure_ai_client(project_endpoint) + try: + exporter = os.environ.get(AdapterConstants.OTEL_EXPORTER_ENDPOINT) + app_insights_conn_str = os.environ.get(APPINSIGHT_CONNSTR_ENV_NAME) + project_endpoint = os.environ.get(AdapterConstants.AZURE_AI_PROJECT_ENDPOINT) + + if exporter or app_insights_conn_str: + from agent_framework.observability import setup_observability + + setup_observability( + enable_sensitive_data=True, + otlp_endpoint=exporter, + applicationinsights_connection_string=app_insights_conn_str, + ) + elif project_endpoint: + self.setup_tracing_with_azure_ai_client(project_endpoint) + except Exception as e: + logger.warning(f"Failed to initialize tracing: {e}", exc_info=True) self.tracer = trace.get_tracer(__name__) def setup_tracing_with_azure_ai_client(self, project_endpoint: str): diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml index 19840e57fadb..a86c9eef2648 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml @@ -20,7 +20,7 @@ classifiers = [ keywords = ["azure", "azure sdk"] dependencies = [ - "azure-ai-agentserver-core>=1.0.0b5", + "azure-ai-agentserver-core>=1.0.0b7", "agent-framework-azure-ai>=1.0.0b251112", "agent-framework-core>=1.0.0b251112", "opentelemetry-exporter-otlp-proto-grpc>=1.36.0", @@ -65,5 +65,9 @@ breaking = false # incompatible python version pyright = false verifytypes = false # incompatible python version for -core verify_keywords = false -mindependency = false # depends on -core package +#mindependency = false # depends on -core package +#latestdependency = false +#whl = false +#depends = false +#pylint = false whl_no_aio = false diff --git a/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md index ec53a2a60c03..84c4a76a27e5 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md @@ -11,7 +11,6 @@ - Fixed error response handling in stream and non-stream modes - ## 1.0.0b6 (2025-11-26) ### Features Added diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py index 5d25dea61be6..bc749a1fd782 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py @@ -14,6 +14,7 @@ from opentelemetry import context as otel_context, trace from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator from starlette.applications import Starlette +from starlette.concurrency import iterate_in_threadpool from starlette.middleware.base import BaseHTTPMiddleware from starlette.middleware.cors import CORSMiddleware from starlette.requests import Request @@ -137,61 +138,46 @@ async def runs_endpoint(request): context_carrier = {} TraceContextTextMapPropagator().inject(context_carrier) + ex = None resp = await self.agent_run(context) - - if inspect.isgenerator(resp): - def gen(): - ctx = TraceContextTextMapPropagator().extract(carrier=context_carrier) - token = otel_context.attach(ctx) - seq = 0 - try: - for event in resp: - seq += 1 - yield _event_to_sse_chunk(event) - except Exception as e: # noqa: BLE001 - logger.error("Error in non-async generator: %s", e, exc_info=True) - err = project_models.ResponseErrorEvent( - sequence_number=seq + 1, - code=project_models.ResponseErrorCode.SERVER_ERROR, - message=_format_error(e), - param="") - yield _event_to_sse_chunk(err) - finally: - logger.info("End of processing CreateResponse request.") - otel_context.detach(token) - - return StreamingResponse(gen(), media_type="text/event-stream") - if inspect.isasyncgen(resp): - async def gen_async(): - ctx = TraceContextTextMapPropagator().extract(carrier=context_carrier) - token = otel_context.attach(ctx) - seq = 0 - try: - async for event in resp: - seq += 1 - yield _event_to_sse_chunk(event) - except Exception as e: # noqa: BLE001 - logger.error("Error in async generator: %s", e, exc_info=True) - err = project_models.ResponseErrorEvent( - sequence_number=seq + 1, - code=project_models.ResponseErrorCode.SERVER_ERROR, - message=_format_error(e), - param="") - yield _event_to_sse_chunk(err) - finally: - logger.info("End of processing CreateResponse request.") - otel_context.detach(token) - - return StreamingResponse(gen_async(), media_type="text/event-stream") - logger.info("End of processing CreateResponse request.") - return JSONResponse(resp.as_dict()) except Exception as e: # TODO: extract status code from exception logger.error(f"Error processing CreateResponse request: {e}", exc_info=True) - err = project_models.ResponseError( + ex = e + + if not context.stream: + logger.info("End of processing CreateResponse request.") + result = resp if not ex else project_models.ResponseError( code=project_models.ResponseErrorCode.SERVER_ERROR, - message=_format_error(e)) - return JSONResponse(err.as_dict()) + message=_format_error(ex)) + return JSONResponse(result.as_dict()) + + async def gen_async(ex): + ctx = TraceContextTextMapPropagator().extract(carrier=context_carrier) + token = otel_context.attach(ctx) + seq = 0 + try: + if ex: + return + it = iterate_in_threadpool(resp) if inspect.isgenerator(resp) else resp + async for event in it: + seq += 1 + yield _event_to_sse_chunk(event) + logger.info("End of processing CreateResponse request.") + except Exception as e: # noqa: BLE001 + logger.error("Error in async generator: %s", e, exc_info=True) + ex = e + finally: + if ex: + err = project_models.ResponseErrorEvent( + sequence_number=seq + 1, + code=project_models.ResponseErrorCode.SERVER_ERROR, + message=_format_error(ex), + param="") + yield _event_to_sse_chunk(err) + otel_context.detach(token) + + return StreamingResponse(gen_async(ex), media_type="text/event-stream") async def liveness_endpoint(request): result = await self.agent_liveness(request) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md index 9c1e35949882..abea93ee106a 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md @@ -11,7 +11,6 @@ - Fixed error response handling in stream and non-stream modes - ## 1.0.0b6 (2025-11-26) ### Feature Added diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py index e6bf10d0b5c2..51937fe31986 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py @@ -268,7 +268,7 @@ async def agent_run_non_stream(self, input_data: dict, context: AgentRunContext, output = self.state_converter.state_to_response(result, context) return output except Exception as e: - logger.error(f"Error during agent run: {e}") + logger.error(f"Error during agent run: {e}", exc_info=True) raise e async def agent_run_astream( @@ -301,7 +301,7 @@ async def agent_run_astream( async for result in self.state_converter.state_to_response_stream(stream, context): yield result except Exception as e: - logger.error(f"Error during streaming agent run: {e}") + logger.error(f"Error during streaming agent run: {e}", exc_info=True) raise e finally: # Close tool_client if provided diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml index 90edfbceb523..b970062738ee 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml @@ -19,7 +19,7 @@ classifiers = [ keywords = ["azure", "azure sdk"] dependencies = [ - "azure-ai-agentserver-core>=1.0.0b5", + "azure-ai-agentserver-core>=1.0.0b7", "langchain>0.3.20", "langchain-openai>0.3.10", "langchain-azure-ai[opentelemetry]>=0.1.8", @@ -64,5 +64,9 @@ breaking = false # incompatible python version pyright = false verifytypes = false # incompatible python version for -core verify_keywords = false -mindependency = false # depends on -core package +#mindependency = false # depends on -core package +#latestdependency = false +#whl = false +#depends = false +#pylint = false whl_no_aio = false