From d53cf5c63569ef27043c68d365e73ebb74f4d304 Mon Sep 17 00:00:00 2001 From: Chris Gillum Date: Mon, 6 Jan 2025 08:35:13 -0800 Subject: [PATCH 01/20] Update version to 0.2b1, require Python 3.9+, and enhance GitHub Actions workflow (#1) (#35) - Bump version in `pyproject.toml` to 0.2b1 and update Python requirement to >=3.9. - Add `protobuf` dependency in `requirements.txt`. - Update GitHub Actions workflow to support Python versions 3.9 to 3.13 and upgrade action versions. - Refactor type hints in various files to use `Optional` and `list` instead of `Union` and `List`. - Improve handling of custom status in orchestration context and related functions. - Fix purge implementation to pass required parameters. Signed-off-by: Albert Callarisa --- .github/workflows/pr-validation.yml | 19 +++++++-- .vscode/settings.json | 5 ++- durabletask/client.py | 40 +++++++++---------- durabletask/internal/grpc_interceptor.py | 3 +- durabletask/internal/helpers.py | 32 +++++++-------- durabletask/internal/shared.py | 15 ++++--- durabletask/task.py | 30 ++++++++------ durabletask/worker.py | 50 ++++++++++++------------ examples/fanout_fanin.py | 7 ++-- pyproject.toml | 4 +- requirements.txt | 1 + tests/test_activity_executor.py | 4 +- tests/test_orchestration_e2e.py | 2 +- tests/test_orchestration_executor.py | 3 +- 14 files changed, 118 insertions(+), 97 deletions(-) diff --git a/.github/workflows/pr-validation.yml b/.github/workflows/pr-validation.yml index 4c09e6b..70ff470 100644 --- a/.github/workflows/pr-validation.yml +++ b/.github/workflows/pr-validation.yml @@ -16,12 +16,12 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.8", "3.9", "3.10", "3.11"] + python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v3 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - name: Install dependencies @@ -35,3 +35,16 @@ jobs: - name: Pytest unit tests run: | pytest -m "not e2e" --verbose + + # Sidecar for running e2e tests requires Go SDK + - name: Install Go SDK + uses: actions/setup-go@v5 + with: + go-version: 'stable' + + # Install and run the durabletask-go sidecar for running e2e tests + - name: Pytest e2e tests + run: | + go install github.com/microsoft/durabletask-go@main + durabletask-go --port 4001 & + pytest -m "e2e" --verbose diff --git a/.vscode/settings.json b/.vscode/settings.json index d737b0b..1c929ac 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -3,7 +3,7 @@ "editor.defaultFormatter": "ms-python.autopep8", "editor.formatOnSave": true, "editor.codeActionsOnSave": { - "source.organizeImports": true, + "source.organizeImports": "explicit" }, "editor.rulers": [ 119 @@ -29,5 +29,6 @@ "coverage.xml", "jacoco.xml", "coverage.cobertura.xml" - ] + ], + "makefile.configureOnOpen": false } \ No newline at end of file diff --git a/durabletask/client.py b/durabletask/client.py index 82f920a..31953ae 100644 --- a/durabletask/client.py +++ b/durabletask/client.py @@ -6,7 +6,7 @@ from dataclasses import dataclass from datetime import datetime from enum import Enum -from typing import Any, List, Tuple, TypeVar, Union +from typing import Any, Optional, TypeVar, Union import grpc from google.protobuf import wrappers_pb2 @@ -42,10 +42,10 @@ class OrchestrationState: runtime_status: OrchestrationStatus created_at: datetime last_updated_at: datetime - serialized_input: Union[str, None] - serialized_output: Union[str, None] - serialized_custom_status: Union[str, None] - failure_details: Union[task.FailureDetails, None] + serialized_input: Optional[str] + serialized_output: Optional[str] + serialized_custom_status: Optional[str] + failure_details: Optional[task.FailureDetails] def raise_if_failed(self): if self.failure_details is not None: @@ -64,7 +64,7 @@ def failure_details(self): return self._failure_details -def new_orchestration_state(instance_id: str, res: pb.GetInstanceResponse) -> Union[OrchestrationState, None]: +def new_orchestration_state(instance_id: str, res: pb.GetInstanceResponse) -> Optional[OrchestrationState]: if not res.exists: return None @@ -92,20 +92,20 @@ def new_orchestration_state(instance_id: str, res: pb.GetInstanceResponse) -> Un class TaskHubGrpcClient: def __init__(self, *, - host_address: Union[str, None] = None, - metadata: Union[List[Tuple[str, str]], None] = None, - log_handler = None, - log_formatter: Union[logging.Formatter, None] = None, + host_address: Optional[str] = None, + metadata: Optional[list[tuple[str, str]]] = None, + log_handler: Optional[logging.Handler] = None, + log_formatter: Optional[logging.Formatter] = None, secure_channel: bool = False): channel = shared.get_grpc_channel(host_address, metadata, secure_channel=secure_channel) self._stub = stubs.TaskHubSidecarServiceStub(channel) self._logger = shared.get_logger("client", log_handler, log_formatter) def schedule_new_orchestration(self, orchestrator: Union[task.Orchestrator[TInput, TOutput], str], *, - input: Union[TInput, None] = None, - instance_id: Union[str, None] = None, - start_at: Union[datetime, None] = None, - reuse_id_policy: Union[pb.OrchestrationIdReusePolicy, None] = None) -> str: + input: Optional[TInput] = None, + instance_id: Optional[str] = None, + start_at: Optional[datetime] = None, + reuse_id_policy: Optional[pb.OrchestrationIdReusePolicy] = None) -> str: name = orchestrator if isinstance(orchestrator, str) else task.get_name(orchestrator) @@ -122,14 +122,14 @@ def schedule_new_orchestration(self, orchestrator: Union[task.Orchestrator[TInpu res: pb.CreateInstanceResponse = self._stub.StartInstance(req) return res.instanceId - def get_orchestration_state(self, instance_id: str, *, fetch_payloads: bool = True) -> Union[OrchestrationState, None]: + def get_orchestration_state(self, instance_id: str, *, fetch_payloads: bool = True) -> Optional[OrchestrationState]: req = pb.GetInstanceRequest(instanceId=instance_id, getInputsAndOutputs=fetch_payloads) res: pb.GetInstanceResponse = self._stub.GetInstance(req) return new_orchestration_state(req.instanceId, res) def wait_for_orchestration_start(self, instance_id: str, *, fetch_payloads: bool = False, - timeout: int = 60) -> Union[OrchestrationState, None]: + timeout: int = 60) -> Optional[OrchestrationState]: req = pb.GetInstanceRequest(instanceId=instance_id, getInputsAndOutputs=fetch_payloads) try: self._logger.info(f"Waiting up to {timeout}s for instance '{instance_id}' to start.") @@ -144,7 +144,7 @@ def wait_for_orchestration_start(self, instance_id: str, *, def wait_for_orchestration_completion(self, instance_id: str, *, fetch_payloads: bool = True, - timeout: int = 60) -> Union[OrchestrationState, None]: + timeout: int = 60) -> Optional[OrchestrationState]: req = pb.GetInstanceRequest(instanceId=instance_id, getInputsAndOutputs=fetch_payloads) try: self._logger.info(f"Waiting {timeout}s for instance '{instance_id}' to complete.") @@ -170,7 +170,7 @@ def wait_for_orchestration_completion(self, instance_id: str, *, raise def raise_orchestration_event(self, instance_id: str, event_name: str, *, - data: Union[Any, None] = None): + data: Optional[Any] = None): req = pb.RaiseEventRequest( instanceId=instance_id, name=event_name, @@ -180,7 +180,7 @@ def raise_orchestration_event(self, instance_id: str, event_name: str, *, self._stub.RaiseEvent(req) def terminate_orchestration(self, instance_id: str, *, - output: Union[Any, None] = None, + output: Optional[Any] = None, recursive: bool = True): req = pb.TerminateRequest( instanceId=instance_id, @@ -203,4 +203,4 @@ def resume_orchestration(self, instance_id: str): def purge_orchestration(self, instance_id: str, recursive: bool = True): req = pb.PurgeInstancesRequest(instanceId=instance_id, recursive=recursive) self._logger.info(f"Purging instance '{instance_id}'.") - self._stub.PurgeInstances() + self._stub.PurgeInstances(req) diff --git a/durabletask/internal/grpc_interceptor.py b/durabletask/internal/grpc_interceptor.py index 5b12ace..738fca9 100644 --- a/durabletask/internal/grpc_interceptor.py +++ b/durabletask/internal/grpc_interceptor.py @@ -2,7 +2,6 @@ # Licensed under the MIT License. from collections import namedtuple -from typing import List, Tuple import grpc @@ -26,7 +25,7 @@ class DefaultClientInterceptorImpl ( StreamUnaryClientInterceptor and StreamStreamClientInterceptor from grpc to add an interceptor to add additional headers to all calls as needed.""" - def __init__(self, metadata: List[Tuple[str, str]]): + def __init__(self, metadata: list[tuple[str, str]]): super().__init__() self._metadata = metadata diff --git a/durabletask/internal/helpers.py b/durabletask/internal/helpers.py index c7354e5..6b36586 100644 --- a/durabletask/internal/helpers.py +++ b/durabletask/internal/helpers.py @@ -3,7 +3,7 @@ import traceback from datetime import datetime -from typing import List, Union +from typing import Optional from google.protobuf import timestamp_pb2, wrappers_pb2 @@ -12,14 +12,14 @@ # TODO: The new_xxx_event methods are only used by test code and should be moved elsewhere -def new_orchestrator_started_event(timestamp: Union[datetime, None] = None) -> pb.HistoryEvent: +def new_orchestrator_started_event(timestamp: Optional[datetime] = None) -> pb.HistoryEvent: ts = timestamp_pb2.Timestamp() if timestamp is not None: ts.FromDatetime(timestamp) return pb.HistoryEvent(eventId=-1, timestamp=ts, orchestratorStarted=pb.OrchestratorStartedEvent()) -def new_execution_started_event(name: str, instance_id: str, encoded_input: Union[str, None] = None) -> pb.HistoryEvent: +def new_execution_started_event(name: str, instance_id: str, encoded_input: Optional[str] = None) -> pb.HistoryEvent: return pb.HistoryEvent( eventId=-1, timestamp=timestamp_pb2.Timestamp(), @@ -49,7 +49,7 @@ def new_timer_fired_event(timer_id: int, fire_at: datetime) -> pb.HistoryEvent: ) -def new_task_scheduled_event(event_id: int, name: str, encoded_input: Union[str, None] = None) -> pb.HistoryEvent: +def new_task_scheduled_event(event_id: int, name: str, encoded_input: Optional[str] = None) -> pb.HistoryEvent: return pb.HistoryEvent( eventId=event_id, timestamp=timestamp_pb2.Timestamp(), @@ -57,7 +57,7 @@ def new_task_scheduled_event(event_id: int, name: str, encoded_input: Union[str, ) -def new_task_completed_event(event_id: int, encoded_output: Union[str, None] = None) -> pb.HistoryEvent: +def new_task_completed_event(event_id: int, encoded_output: Optional[str] = None) -> pb.HistoryEvent: return pb.HistoryEvent( eventId=-1, timestamp=timestamp_pb2.Timestamp(), @@ -77,7 +77,7 @@ def new_sub_orchestration_created_event( event_id: int, name: str, instance_id: str, - encoded_input: Union[str, None] = None) -> pb.HistoryEvent: + encoded_input: Optional[str] = None) -> pb.HistoryEvent: return pb.HistoryEvent( eventId=event_id, timestamp=timestamp_pb2.Timestamp(), @@ -88,7 +88,7 @@ def new_sub_orchestration_created_event( ) -def new_sub_orchestration_completed_event(event_id: int, encoded_output: Union[str, None] = None) -> pb.HistoryEvent: +def new_sub_orchestration_completed_event(event_id: int, encoded_output: Optional[str] = None) -> pb.HistoryEvent: return pb.HistoryEvent( eventId=-1, timestamp=timestamp_pb2.Timestamp(), @@ -116,7 +116,7 @@ def new_failure_details(ex: Exception) -> pb.TaskFailureDetails: ) -def new_event_raised_event(name: str, encoded_input: Union[str, None] = None) -> pb.HistoryEvent: +def new_event_raised_event(name: str, encoded_input: Optional[str] = None) -> pb.HistoryEvent: return pb.HistoryEvent( eventId=-1, timestamp=timestamp_pb2.Timestamp(), @@ -140,7 +140,7 @@ def new_resume_event() -> pb.HistoryEvent: ) -def new_terminated_event(*, encoded_output: Union[str, None] = None) -> pb.HistoryEvent: +def new_terminated_event(*, encoded_output: Optional[str] = None) -> pb.HistoryEvent: return pb.HistoryEvent( eventId=-1, timestamp=timestamp_pb2.Timestamp(), @@ -150,7 +150,7 @@ def new_terminated_event(*, encoded_output: Union[str, None] = None) -> pb.Histo ) -def get_string_value(val: Union[str, None]) -> Union[wrappers_pb2.StringValue, None]: +def get_string_value(val: Optional[str]) -> Optional[wrappers_pb2.StringValue]: if val is None: return None else: @@ -160,9 +160,9 @@ def get_string_value(val: Union[str, None]) -> Union[wrappers_pb2.StringValue, N def new_complete_orchestration_action( id: int, status: pb.OrchestrationStatus, - result: Union[str, None] = None, - failure_details: Union[pb.TaskFailureDetails, None] = None, - carryover_events: Union[List[pb.HistoryEvent], None] = None) -> pb.OrchestratorAction: + result: Optional[str] = None, + failure_details: Optional[pb.TaskFailureDetails] = None, + carryover_events: Optional[list[pb.HistoryEvent]] = None) -> pb.OrchestratorAction: completeOrchestrationAction = pb.CompleteOrchestrationAction( orchestrationStatus=status, result=get_string_value(result), @@ -178,7 +178,7 @@ def new_create_timer_action(id: int, fire_at: datetime) -> pb.OrchestratorAction return pb.OrchestratorAction(id=id, createTimer=pb.CreateTimerAction(fireAt=timestamp)) -def new_schedule_task_action(id: int, name: str, encoded_input: Union[str, None]) -> pb.OrchestratorAction: +def new_schedule_task_action(id: int, name: str, encoded_input: Optional[str]) -> pb.OrchestratorAction: return pb.OrchestratorAction(id=id, scheduleTask=pb.ScheduleTaskAction( name=name, input=get_string_value(encoded_input) @@ -194,8 +194,8 @@ def new_timestamp(dt: datetime) -> timestamp_pb2.Timestamp: def new_create_sub_orchestration_action( id: int, name: str, - instance_id: Union[str, None], - encoded_input: Union[str, None]) -> pb.OrchestratorAction: + instance_id: Optional[str], + encoded_input: Optional[str]) -> pb.OrchestratorAction: return pb.OrchestratorAction(id=id, createSubOrchestration=pb.CreateSubOrchestrationAction( name=name, instanceId=instance_id, diff --git a/durabletask/internal/shared.py b/durabletask/internal/shared.py index 80c3d56..400529a 100644 --- a/durabletask/internal/shared.py +++ b/durabletask/internal/shared.py @@ -5,7 +5,7 @@ import json import logging from types import SimpleNamespace -from typing import Any, Dict, List, Tuple, Union +from typing import Any, Optional import grpc @@ -20,7 +20,10 @@ def get_default_host_address() -> str: return "localhost:4001" -def get_grpc_channel(host_address: Union[str, None], metadata: Union[List[Tuple[str, str]], None], secure_channel: bool = False) -> grpc.Channel: +def get_grpc_channel( + host_address: Optional[str], + metadata: Optional[list[tuple[str, str]]], + secure_channel: bool = False) -> grpc.Channel: if host_address is None: host_address = get_default_host_address() @@ -36,8 +39,8 @@ def get_grpc_channel(host_address: Union[str, None], metadata: Union[List[Tuple[ def get_logger( name_suffix: str, - log_handler: Union[logging.Handler, None] = None, - log_formatter: Union[logging.Formatter, None] = None) -> logging.Logger: + log_handler: Optional[logging.Handler] = None, + log_formatter: Optional[logging.Formatter] = None) -> logging.Logger: logger = logging.Logger(f"durabletask-{name_suffix}") # Add a default log handler if none is provided @@ -78,7 +81,7 @@ def default(self, obj): if dataclasses.is_dataclass(obj): # Dataclasses are not serializable by default, so we convert them to a dict and mark them for # automatic deserialization by the receiver - d = dataclasses.asdict(obj) + d = dataclasses.asdict(obj) # type: ignore d[AUTO_SERIALIZED] = True return d elif isinstance(obj, SimpleNamespace): @@ -94,7 +97,7 @@ class InternalJSONDecoder(json.JSONDecoder): def __init__(self, *args, **kwargs): super().__init__(object_hook=self.dict_to_object, *args, **kwargs) - def dict_to_object(self, d: Dict[str, Any]): + def dict_to_object(self, d: dict[str, Any]): # If the object was serialized by the InternalJSONEncoder, deserialize it as a SimpleNamespace if d.pop(AUTO_SERIALIZED, False): return SimpleNamespace(**d) diff --git a/durabletask/task.py b/durabletask/task.py index a9f85de..a40602b 100644 --- a/durabletask/task.py +++ b/durabletask/task.py @@ -7,8 +7,7 @@ import math from abc import ABC, abstractmethod from datetime import datetime, timedelta -from typing import (Any, Callable, Generator, Generic, List, Optional, TypeVar, - Union) +from typing import Any, Callable, Generator, Generic, Optional, TypeVar, Union import durabletask.internal.helpers as pbh import durabletask.internal.orchestrator_service_pb2 as pb @@ -72,8 +71,13 @@ def is_replaying(self) -> bool: pass @abstractmethod - def set_custom_status(self, custom_status: str) -> None: - """Set the custom status. + def set_custom_status(self, custom_status: Any) -> None: + """Set the orchestration instance's custom status. + + Parameters + ---------- + custom_status: Any + A JSON-serializable custom status value to set. """ pass @@ -254,9 +258,9 @@ def get_exception(self) -> TaskFailedError: class CompositeTask(Task[T]): """A task that is composed of other tasks.""" - _tasks: List[Task] + _tasks: list[Task] - def __init__(self, tasks: List[Task]): + def __init__(self, tasks: list[Task]): super().__init__() self._tasks = tasks self._completed_tasks = 0 @@ -266,17 +270,17 @@ def __init__(self, tasks: List[Task]): if task.is_complete: self.on_child_completed(task) - def get_tasks(self) -> List[Task]: + def get_tasks(self) -> list[Task]: return self._tasks @abstractmethod def on_child_completed(self, task: Task[T]): pass -class WhenAllTask(CompositeTask[List[T]]): +class WhenAllTask(CompositeTask[list[T]]): """A task that completes when all of its child tasks complete.""" - def __init__(self, tasks: List[Task[T]]): + def __init__(self, tasks: list[Task[T]]): super().__init__(tasks) self._completed_tasks = 0 self._failed_tasks = 0 @@ -340,7 +344,7 @@ def __init__(self, retry_policy: RetryPolicy, action: pb.OrchestratorAction, def increment_attempt_count(self) -> None: self._attempt_count += 1 - def compute_next_delay(self) -> Union[timedelta, None]: + def compute_next_delay(self) -> Optional[timedelta]: if self._attempt_count >= self._retry_policy.max_number_of_attempts: return None @@ -375,7 +379,7 @@ def set_retryable_parent(self, retryable_task: RetryableTask): class WhenAnyTask(CompositeTask[Task]): """A task that completes when any of its child tasks complete.""" - def __init__(self, tasks: List[Task]): + def __init__(self, tasks: list[Task]): super().__init__(tasks) def on_child_completed(self, task: Task): @@ -385,12 +389,12 @@ def on_child_completed(self, task: Task): self._result = task -def when_all(tasks: List[Task[T]]) -> WhenAllTask[T]: +def when_all(tasks: list[Task[T]]) -> WhenAllTask[T]: """Returns a task that completes when all of the provided tasks complete or when one of the tasks fail.""" return WhenAllTask(tasks) -def when_any(tasks: List[Task]) -> WhenAnyTask: +def when_any(tasks: list[Task]) -> WhenAnyTask: """Returns a task that completes when any of the provided tasks complete or fail.""" return WhenAnyTask(tasks) diff --git a/durabletask/worker.py b/durabletask/worker.py index bcc1a30..75e2e37 100644 --- a/durabletask/worker.py +++ b/durabletask/worker.py @@ -6,8 +6,7 @@ from datetime import datetime, timedelta from threading import Event, Thread from types import GeneratorType -from typing import (Any, Dict, Generator, List, Optional, Sequence, Tuple, - TypeVar, Union) +from typing import Any, Generator, Optional, Sequence, TypeVar, Union import grpc from google.protobuf import empty_pb2, wrappers_pb2 @@ -25,8 +24,8 @@ class _Registry: - orchestrators: Dict[str, task.Orchestrator] - activities: Dict[str, task.Activity] + orchestrators: dict[str, task.Orchestrator] + activities: dict[str, task.Activity] def __init__(self): self.orchestrators = {} @@ -86,7 +85,7 @@ class TaskHubGrpcWorker: def __init__(self, *, host_address: Optional[str] = None, - metadata: Optional[List[Tuple[str, str]]] = None, + metadata: Optional[list[tuple[str, str]]] = None, log_handler=None, log_formatter: Optional[logging.Formatter] = None, secure_channel: bool = False): @@ -140,7 +139,7 @@ def run_loop(): # The stream blocks until either a work item is received or the stream is canceled # by another thread (see the stop() method). - for work_item in self._response_stream: + for work_item in self._response_stream: # type: ignore request_type = work_item.WhichOneof('request') self._logger.debug(f'Received "{request_type}" work item') if work_item.HasField('orchestratorRequest'): @@ -189,7 +188,10 @@ def _execute_orchestrator(self, req: pb.OrchestratorRequest, stub: stubs.TaskHub try: executor = _OrchestrationExecutor(self._registry, self._logger) result = executor.execute(req.instanceId, req.pastEvents, req.newEvents) - res = pb.OrchestratorResponse(instanceId=req.instanceId, actions=result.actions, customStatus=wrappers_pb2.StringValue(value=result.custom_status)) + res = pb.OrchestratorResponse( + instanceId=req.instanceId, + actions=result.actions, + customStatus=pbh.get_string_value(result.encoded_custom_status)) except Exception as ex: self._logger.exception(f"An error occurred while trying to execute instance '{req.instanceId}': {ex}") failure_details = pbh.new_failure_details(ex) @@ -232,17 +234,17 @@ def __init__(self, instance_id: str): self._is_replaying = True self._is_complete = False self._result = None - self._pending_actions: Dict[int, pb.OrchestratorAction] = {} - self._pending_tasks: Dict[int, task.CompletableTask] = {} + self._pending_actions: dict[int, pb.OrchestratorAction] = {} + self._pending_tasks: dict[int, task.CompletableTask] = {} self._sequence_number = 0 self._current_utc_datetime = datetime(1000, 1, 1) self._instance_id = instance_id self._completion_status: Optional[pb.OrchestrationStatus] = None - self._received_events: Dict[str, List[Any]] = {} - self._pending_events: Dict[str, List[task.CompletableTask]] = {} + self._received_events: dict[str, list[Any]] = {} + self._pending_events: dict[str, list[task.CompletableTask]] = {} self._new_input: Optional[Any] = None self._save_events = False - self._custom_status: str = "" + self._encoded_custom_status: Optional[str] = None def run(self, generator: Generator[task.Task, Any, Any]): self._generator = generator @@ -314,10 +316,10 @@ def set_continued_as_new(self, new_input: Any, save_events: bool): self._new_input = new_input self._save_events = save_events - def get_actions(self) -> List[pb.OrchestratorAction]: + def get_actions(self) -> list[pb.OrchestratorAction]: if self._completion_status == pb.ORCHESTRATION_STATUS_CONTINUED_AS_NEW: # When continuing-as-new, we only return a single completion action. - carryover_events: Optional[List[pb.HistoryEvent]] = None + carryover_events: Optional[list[pb.HistoryEvent]] = None if self._save_events: carryover_events = [] # We need to save the current set of pending events so that they can be @@ -356,8 +358,8 @@ def is_replaying(self) -> bool: def current_utc_datetime(self, value: datetime): self._current_utc_datetime = value - def set_custom_status(self, custom_status: str) -> None: - self._custom_status = custom_status + def set_custom_status(self, custom_status: Any) -> None: + self._encoded_custom_status = shared.to_json(custom_status) if custom_status is not None else None def create_timer(self, fire_at: Union[datetime, timedelta]) -> task.Task: return self.create_timer_internal(fire_at) @@ -462,12 +464,12 @@ def continue_as_new(self, new_input, *, save_events: bool = False) -> None: class ExecutionResults: - actions: List[pb.OrchestratorAction] - custom_status: str + actions: list[pb.OrchestratorAction] + encoded_custom_status: Optional[str] - def __init__(self, actions: List[pb.OrchestratorAction], custom_status: str): + def __init__(self, actions: list[pb.OrchestratorAction], encoded_custom_status: Optional[str]): self.actions = actions - self.custom_status = custom_status + self.encoded_custom_status = encoded_custom_status class _OrchestrationExecutor: _generator: Optional[task.Orchestrator] = None @@ -476,7 +478,7 @@ def __init__(self, registry: _Registry, logger: logging.Logger): self._registry = registry self._logger = logger self._is_suspended = False - self._suspended_events: List[pb.HistoryEvent] = [] + self._suspended_events: list[pb.HistoryEvent] = [] def execute(self, instance_id: str, old_events: Sequence[pb.HistoryEvent], new_events: Sequence[pb.HistoryEvent]) -> ExecutionResults: if not new_events: @@ -513,7 +515,7 @@ def execute(self, instance_id: str, old_events: Sequence[pb.HistoryEvent], new_e actions = ctx.get_actions() if self._logger.level <= logging.DEBUG: self._logger.debug(f"{instance_id}: Returning {len(actions)} action(s): {_get_action_summary(actions)}") - return ExecutionResults(actions=actions, custom_status=ctx._custom_status) + return ExecutionResults(actions=actions, encoded_custom_status=ctx._encoded_custom_status) def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEvent) -> None: if self._is_suspended and _is_suspendable(event): @@ -829,7 +831,7 @@ def _get_new_event_summary(new_events: Sequence[pb.HistoryEvent]) -> str: elif len(new_events) == 1: return f"[{new_events[0].WhichOneof('eventType')}]" else: - counts: Dict[str, int] = {} + counts: dict[str, int] = {} for event in new_events: event_type = event.WhichOneof('eventType') counts[event_type] = counts.get(event_type, 0) + 1 @@ -843,7 +845,7 @@ def _get_action_summary(new_actions: Sequence[pb.OrchestratorAction]) -> str: elif len(new_actions) == 1: return f"[{new_actions[0].WhichOneof('orchestratorActionType')}]" else: - counts: Dict[str, int] = {} + counts: dict[str, int] = {} for action in new_actions: action_type = action.WhichOneof('orchestratorActionType') counts[action_type] = counts.get(action_type, 0) + 1 diff --git a/examples/fanout_fanin.py b/examples/fanout_fanin.py index 3e054df..c53744f 100644 --- a/examples/fanout_fanin.py +++ b/examples/fanout_fanin.py @@ -3,12 +3,11 @@ to complete, and prints an aggregate summary of the outputs.""" import random import time -from typing import List from durabletask import client, task, worker -def get_work_items(ctx: task.ActivityContext, _) -> List[str]: +def get_work_items(ctx: task.ActivityContext, _) -> list[str]: """Activity function that returns a list of work items""" # return a random number of work items count = random.randint(2, 10) @@ -32,11 +31,11 @@ def orchestrator(ctx: task.OrchestrationContext, _): activity functions in parallel, waits for them all to complete, and prints an aggregate summary of the outputs""" - work_items: List[str] = yield ctx.call_activity(get_work_items) + work_items: list[str] = yield ctx.call_activity(get_work_items) # execute the work-items in parallel and wait for them all to return tasks = [ctx.call_activity(process_work_item, input=item) for item in work_items] - results: List[int] = yield task.when_all(tasks) + results: list[int] = yield task.when_all(tasks) # return an aggregate summary of the results return { diff --git a/pyproject.toml b/pyproject.toml index d57957d..577824b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -9,7 +9,7 @@ build-backend = "setuptools.build_meta" [project] name = "durabletask" -version = "0.1.1-alpha.1" +version = "0.2b1" description = "A Durable Task Client SDK for Python" keywords = [ "durable", @@ -21,7 +21,7 @@ classifiers = [ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", ] -requires-python = ">=3.8" +requires-python = ">=3.9" license = {file = "LICENSE"} readme = "README.md" dependencies = [ diff --git a/requirements.txt b/requirements.txt index 641cee7..af76d88 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,6 @@ autopep8 grpcio grpcio-tools +protobuf pytest pytest-cov \ No newline at end of file diff --git a/tests/test_activity_executor.py b/tests/test_activity_executor.py index b9a4bd4..bfc8eaf 100644 --- a/tests/test_activity_executor.py +++ b/tests/test_activity_executor.py @@ -3,7 +3,7 @@ import json import logging -from typing import Any, Tuple, Union +from typing import Any, Optional, Tuple from durabletask import task, worker @@ -40,7 +40,7 @@ def test_activity(ctx: task.ActivityContext, _): executor, _ = _get_activity_executor(test_activity) - caught_exception: Union[Exception, None] = None + caught_exception: Optional[Exception] = None try: executor.execute(TEST_INSTANCE_ID, "Bogus", TEST_TASK_ID, None) except Exception as ex: diff --git a/tests/test_orchestration_e2e.py b/tests/test_orchestration_e2e.py index 1cfc520..d3d7f0b 100644 --- a/tests/test_orchestration_e2e.py +++ b/tests/test_orchestration_e2e.py @@ -466,4 +466,4 @@ def empty_orchestrator(ctx: task.OrchestrationContext, _): assert state.runtime_status == client.OrchestrationStatus.COMPLETED assert state.serialized_input is None assert state.serialized_output is None - assert state.serialized_custom_status is "\"foobaz\"" + assert state.serialized_custom_status == "\"foobaz\"" diff --git a/tests/test_orchestration_executor.py b/tests/test_orchestration_executor.py index 95eab0b..cb77c81 100644 --- a/tests/test_orchestration_executor.py +++ b/tests/test_orchestration_executor.py @@ -4,7 +4,6 @@ import json import logging from datetime import datetime, timedelta -from typing import List import pytest @@ -1184,7 +1183,7 @@ def orchestrator(ctx: task.OrchestrationContext, _): assert str(ex) in complete_action.failureDetails.errorMessage -def get_and_validate_single_complete_orchestration_action(actions: List[pb.OrchestratorAction]) -> pb.CompleteOrchestrationAction: +def get_and_validate_single_complete_orchestration_action(actions: list[pb.OrchestratorAction]) -> pb.CompleteOrchestrationAction: assert len(actions) == 1 assert type(actions[0]) is pb.OrchestratorAction assert actions[0].HasField("completeOrchestration") From 4a303cb5f0ae14ea00c48ccb556d99ca096a5d71 Mon Sep 17 00:00:00 2001 From: Bernd Verst Date: Wed, 8 Jan 2025 14:51:24 -0800 Subject: [PATCH 02/20] Downgrade required `grpcio` and `protobuf` versions (#36) Signed-off-by: Albert Callarisa --- CHANGELOG.md | 4 + Makefile | 5 +- README.md | 1 + dev-requirements.txt | 1 + durabletask/internal/__init__.py | 0 .../internal/orchestrator_service_pb2.py | 386 +++++----- .../internal/orchestrator_service_pb2_grpc.py | 673 ++++++------------ requirements.txt | 5 +- 8 files changed, 414 insertions(+), 661 deletions(-) create mode 100644 dev-requirements.txt delete mode 100644 durabletask/internal/__init__.py diff --git a/CHANGELOG.md b/CHANGELOG.md index fc4b3d2..a09078d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Added `set_custom_status` orchestrator API ([#31](https://github.com/microsoft/durabletask-python/pull/31)) - contributed by [@famarting](https://github.com/famarting) - Added `purge_orchestration` client API ([#34](https://github.com/microsoft/durabletask-python/pull/34)) - contributed by [@famarting](https://github.com/famarting) +### Changes + +- Protos are compiled with gRPC 1.62.3 / protobuf 3.25.X instead of the latest release. This ensures compatibility with a wider range of grpcio versions for better compatibility with other packages / libraries. + ### Updates - Updated `durabletask-protobuf` submodule reference to latest diff --git a/Makefile b/Makefile index 16b883e..68a9b89 100644 --- a/Makefile +++ b/Makefile @@ -11,7 +11,8 @@ install: python3 -m pip install . gen-proto: -# NOTE: There is currently a hand-edit that we make to the generated orchestrator_service_pb2.py file after it's generated to help resolve import problems. - python3 -m grpc_tools.protoc --proto_path=./submodules/durabletask-protobuf/protos --python_out=./durabletask/internal --pyi_out=./durabletask/internal --grpc_python_out=./durabletask/internal orchestrator_service.proto + cp ./submodules/durabletask-protobuf/protos/orchestrator_service.proto durabletask/internal/orchestrator_service.proto + python3 -m grpc_tools.protoc --proto_path=. --python_out=. --pyi_out=. --grpc_python_out=. ./durabletask/internal/orchestrator_service.proto + rm durabletask/internal/*.proto .PHONY: init test-unit test-e2e gen-proto install diff --git a/README.md b/README.md index 22b3c44..81b5a54 100644 --- a/README.md +++ b/README.md @@ -170,6 +170,7 @@ git submodule update --init Once the submodule is available, the corresponding source code can be regenerated using the following command from the project root: ```sh +pip3 install -r dev-requirements.txt make gen-proto ``` diff --git a/dev-requirements.txt b/dev-requirements.txt new file mode 100644 index 0000000..119f072 --- /dev/null +++ b/dev-requirements.txt @@ -0,0 +1 @@ +grpcio-tools==1.62.3 # 1.62.X is the latest version before protobuf 1.26.X is used which has breaking changes for Python diff --git a/durabletask/internal/__init__.py b/durabletask/internal/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/durabletask/internal/orchestrator_service_pb2.py b/durabletask/internal/orchestrator_service_pb2.py index 6ee3bbb..9c92eac 100644 --- a/durabletask/internal/orchestrator_service_pb2.py +++ b/durabletask/internal/orchestrator_service_pb2.py @@ -1,22 +1,12 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! -# NO CHECKED-IN PROTOBUF GENCODE -# source: orchestrator_service.proto -# Protobuf Python Version: 5.27.2 +# source: durabletask/internal/orchestrator_service.proto +# Protobuf Python Version: 4.25.1 """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import runtime_version as _runtime_version from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder -_runtime_version.ValidateProtobufRuntimeVersion( - _runtime_version.Domain.PUBLIC, - 5, - 27, - 2, - '', - 'orchestrator_service.proto' -) # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() @@ -28,196 +18,196 @@ from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1aorchestrator_service.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1bgoogle/protobuf/empty.proto\"^\n\x15OrchestrationInstance\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xed\x01\n\x0f\x41\x63tivityRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\x12\x0e\n\x06taskId\x18\x05 \x01(\x05\x12)\n\x12parentTraceContext\x18\x06 \x01(\x0b\x32\r.TraceContext\"\x91\x01\n\x10\x41\x63tivityResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0e\n\x06taskId\x18\x02 \x01(\x05\x12,\n\x06result\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\"\xb2\x01\n\x12TaskFailureDetails\x12\x11\n\terrorType\x18\x01 \x01(\t\x12\x14\n\x0c\x65rrorMessage\x18\x02 \x01(\t\x12\x30\n\nstackTrace\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x0cinnerFailure\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\x12\x16\n\x0eisNonRetriable\x18\x05 \x01(\x08\"\xbf\x01\n\x12ParentInstanceInfo\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12*\n\x04name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\"i\n\x0cTraceContext\x12\x13\n\x0btraceParent\x18\x01 \x01(\t\x12\x12\n\x06spanID\x18\x02 \x01(\tB\x02\x18\x01\x12\x30\n\ntraceState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x88\x03\n\x15\x45xecutionStartedEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\x12+\n\x0eparentInstance\x18\x05 \x01(\x0b\x32\x13.ParentInstanceInfo\x12;\n\x17scheduledStartTimestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12)\n\x12parentTraceContext\x18\x07 \x01(\x0b\x32\r.TraceContext\x12\x39\n\x13orchestrationSpanID\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xa7\x01\n\x17\x45xecutionCompletedEvent\x12\x31\n\x13orchestrationStatus\x18\x01 \x01(\x0e\x32\x14.OrchestrationStatus\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x03 \x01(\x0b\x32\x13.TaskFailureDetails\"X\n\x18\x45xecutionTerminatedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x0f\n\x07recurse\x18\x02 \x01(\x08\"\xa9\x01\n\x12TaskScheduledEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x12parentTraceContext\x18\x04 \x01(\x0b\x32\r.TraceContext\"[\n\x12TaskCompletedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"W\n\x0fTaskFailedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\"\xcf\x01\n$SubOrchestrationInstanceCreatedEvent\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x12parentTraceContext\x18\x05 \x01(\x0b\x32\r.TraceContext\"o\n&SubOrchestrationInstanceCompletedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"k\n#SubOrchestrationInstanceFailedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\"?\n\x11TimerCreatedEvent\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"N\n\x0fTimerFiredEvent\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0f\n\x07timerId\x18\x02 \x01(\x05\"\x1a\n\x18OrchestratorStartedEvent\"\x1c\n\x1aOrchestratorCompletedEvent\"_\n\x0e\x45ventSentEvent\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"M\n\x10\x45ventRaisedEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x05input\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\":\n\x0cGenericEvent\x12*\n\x04\x64\x61ta\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"D\n\x11HistoryStateEvent\x12/\n\x12orchestrationState\x18\x01 \x01(\x0b\x32\x13.OrchestrationState\"A\n\x12\x43ontinueAsNewEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"F\n\x17\x45xecutionSuspendedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"D\n\x15\x45xecutionResumedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x86\t\n\x0cHistoryEvent\x12\x0f\n\x07\x65ventId\x18\x01 \x01(\x05\x12-\n\ttimestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x10\x65xecutionStarted\x18\x03 \x01(\x0b\x32\x16.ExecutionStartedEventH\x00\x12\x36\n\x12\x65xecutionCompleted\x18\x04 \x01(\x0b\x32\x18.ExecutionCompletedEventH\x00\x12\x38\n\x13\x65xecutionTerminated\x18\x05 \x01(\x0b\x32\x19.ExecutionTerminatedEventH\x00\x12,\n\rtaskScheduled\x18\x06 \x01(\x0b\x32\x13.TaskScheduledEventH\x00\x12,\n\rtaskCompleted\x18\x07 \x01(\x0b\x32\x13.TaskCompletedEventH\x00\x12&\n\ntaskFailed\x18\x08 \x01(\x0b\x32\x10.TaskFailedEventH\x00\x12P\n\x1fsubOrchestrationInstanceCreated\x18\t \x01(\x0b\x32%.SubOrchestrationInstanceCreatedEventH\x00\x12T\n!subOrchestrationInstanceCompleted\x18\n \x01(\x0b\x32\'.SubOrchestrationInstanceCompletedEventH\x00\x12N\n\x1esubOrchestrationInstanceFailed\x18\x0b \x01(\x0b\x32$.SubOrchestrationInstanceFailedEventH\x00\x12*\n\x0ctimerCreated\x18\x0c \x01(\x0b\x32\x12.TimerCreatedEventH\x00\x12&\n\ntimerFired\x18\r \x01(\x0b\x32\x10.TimerFiredEventH\x00\x12\x38\n\x13orchestratorStarted\x18\x0e \x01(\x0b\x32\x19.OrchestratorStartedEventH\x00\x12<\n\x15orchestratorCompleted\x18\x0f \x01(\x0b\x32\x1b.OrchestratorCompletedEventH\x00\x12$\n\teventSent\x18\x10 \x01(\x0b\x32\x0f.EventSentEventH\x00\x12(\n\x0b\x65ventRaised\x18\x11 \x01(\x0b\x32\x11.EventRaisedEventH\x00\x12%\n\x0cgenericEvent\x18\x12 \x01(\x0b\x32\r.GenericEventH\x00\x12*\n\x0chistoryState\x18\x13 \x01(\x0b\x32\x12.HistoryStateEventH\x00\x12,\n\rcontinueAsNew\x18\x14 \x01(\x0b\x32\x13.ContinueAsNewEventH\x00\x12\x36\n\x12\x65xecutionSuspended\x18\x15 \x01(\x0b\x32\x18.ExecutionSuspendedEventH\x00\x12\x32\n\x10\x65xecutionResumed\x18\x16 \x01(\x0b\x32\x16.ExecutionResumedEventH\x00\x42\x0b\n\teventType\"~\n\x12ScheduleTaskAction\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x9c\x01\n\x1c\x43reateSubOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"?\n\x11\x43reateTimerAction\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"u\n\x0fSendEventAction\x12(\n\x08instance\x18\x01 \x01(\x0b\x32\x16.OrchestrationInstance\x12\x0c\n\x04name\x18\x02 \x01(\t\x12*\n\x04\x64\x61ta\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xb4\x02\n\x1b\x43ompleteOrchestrationAction\x12\x31\n\x13orchestrationStatus\x18\x01 \x01(\x0e\x32\x14.OrchestrationStatus\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07\x64\x65tails\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\nnewVersion\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12&\n\x0f\x63\x61rryoverEvents\x18\x05 \x03(\x0b\x32\r.HistoryEvent\x12+\n\x0e\x66\x61ilureDetails\x18\x06 \x01(\x0b\x32\x13.TaskFailureDetails\"q\n\x1cTerminateOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x0f\n\x07recurse\x18\x03 \x01(\x08\"\xfa\x02\n\x12OrchestratorAction\x12\n\n\x02id\x18\x01 \x01(\x05\x12+\n\x0cscheduleTask\x18\x02 \x01(\x0b\x32\x13.ScheduleTaskActionH\x00\x12?\n\x16\x63reateSubOrchestration\x18\x03 \x01(\x0b\x32\x1d.CreateSubOrchestrationActionH\x00\x12)\n\x0b\x63reateTimer\x18\x04 \x01(\x0b\x32\x12.CreateTimerActionH\x00\x12%\n\tsendEvent\x18\x05 \x01(\x0b\x32\x10.SendEventActionH\x00\x12=\n\x15\x63ompleteOrchestration\x18\x06 \x01(\x0b\x32\x1c.CompleteOrchestrationActionH\x00\x12?\n\x16terminateOrchestration\x18\x07 \x01(\x0b\x32\x1d.TerminateOrchestrationActionH\x00\x42\x18\n\x16orchestratorActionType\"\xda\x01\n\x13OrchestratorRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12!\n\npastEvents\x18\x03 \x03(\x0b\x32\r.HistoryEvent\x12 \n\tnewEvents\x18\x04 \x03(\x0b\x32\r.HistoryEvent\x12\x37\n\x10\x65ntityParameters\x18\x05 \x01(\x0b\x32\x1d.OrchestratorEntityParameters\"\x84\x01\n\x14OrchestratorResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12$\n\x07\x61\x63tions\x18\x02 \x03(\x0b\x32\x13.OrchestratorAction\x12\x32\n\x0c\x63ustomStatus\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xa3\x03\n\x15\x43reateInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12;\n\x17scheduledStartTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12?\n\x1aorchestrationIdReusePolicy\x18\x06 \x01(\x0b\x32\x1b.OrchestrationIdReusePolicy\x12\x31\n\x0b\x65xecutionId\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x04tags\x18\x08 \x03(\x0b\x32 .CreateInstanceRequest.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"w\n\x1aOrchestrationIdReusePolicy\x12-\n\x0foperationStatus\x18\x01 \x03(\x0e\x32\x14.OrchestrationStatus\x12*\n\x06\x61\x63tion\x18\x02 \x01(\x0e\x32\x1a.CreateOrchestrationAction\",\n\x16\x43reateInstanceResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\"E\n\x12GetInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x1b\n\x13getInputsAndOutputs\x18\x02 \x01(\x08\"V\n\x13GetInstanceResponse\x12\x0e\n\x06\x65xists\x18\x01 \x01(\x08\x12/\n\x12orchestrationState\x18\x02 \x01(\x0b\x32\x13.OrchestrationState\"Y\n\x15RewindInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x18\n\x16RewindInstanceResponse\"\xa4\x05\n\x12OrchestrationState\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\x13orchestrationStatus\x18\x04 \x01(\x0e\x32\x14.OrchestrationStatus\x12;\n\x17scheduledStartTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x34\n\x10\x63reatedTimestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x38\n\x14lastUpdatedTimestamp\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\x05input\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12,\n\x06output\x18\t \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0c\x63ustomStatus\x18\n \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x0b \x01(\x0b\x32\x13.TaskFailureDetails\x12\x31\n\x0b\x65xecutionId\x18\x0c \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x12\x63ompletedTimestamp\x18\r \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x36\n\x10parentInstanceId\x18\x0e \x01(\x0b\x32\x1c.google.protobuf.StringValue\"b\n\x11RaiseEventRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x14\n\x12RaiseEventResponse\"g\n\x10TerminateRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06output\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x11\n\trecursive\x18\x03 \x01(\x08\"\x13\n\x11TerminateResponse\"R\n\x0eSuspendRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x11\n\x0fSuspendResponse\"Q\n\rResumeRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x10\n\x0eResumeResponse\"6\n\x15QueryInstancesRequest\x12\x1d\n\x05query\x18\x01 \x01(\x0b\x32\x0e.InstanceQuery\"\x82\x03\n\rInstanceQuery\x12+\n\rruntimeStatus\x18\x01 \x03(\x0e\x32\x14.OrchestrationStatus\x12\x33\n\x0f\x63reatedTimeFrom\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rcreatedTimeTo\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0ctaskHubNames\x18\x04 \x03(\x0b\x32\x1c.google.protobuf.StringValue\x12\x18\n\x10maxInstanceCount\x18\x05 \x01(\x05\x12\x37\n\x11\x63ontinuationToken\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10instanceIdPrefix\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1d\n\x15\x66\x65tchInputsAndOutputs\x18\x08 \x01(\x08\"\x82\x01\n\x16QueryInstancesResponse\x12/\n\x12orchestrationState\x18\x01 \x03(\x0b\x32\x13.OrchestrationState\x12\x37\n\x11\x63ontinuationToken\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x80\x01\n\x15PurgeInstancesRequest\x12\x14\n\ninstanceId\x18\x01 \x01(\tH\x00\x12\x33\n\x13purgeInstanceFilter\x18\x02 \x01(\x0b\x32\x14.PurgeInstanceFilterH\x00\x12\x11\n\trecursive\x18\x03 \x01(\x08\x42\t\n\x07request\"\xaa\x01\n\x13PurgeInstanceFilter\x12\x33\n\x0f\x63reatedTimeFrom\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rcreatedTimeTo\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\rruntimeStatus\x18\x03 \x03(\x0e\x32\x14.OrchestrationStatus\"6\n\x16PurgeInstancesResponse\x12\x1c\n\x14\x64\x65letedInstanceCount\x18\x01 \x01(\x05\"0\n\x14\x43reateTaskHubRequest\x12\x18\n\x10recreateIfExists\x18\x01 \x01(\x08\"\x17\n\x15\x43reateTaskHubResponse\"\x16\n\x14\x44\x65leteTaskHubRequest\"\x17\n\x15\x44\x65leteTaskHubResponse\"\xaa\x01\n\x13SignalEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x11\n\trequestId\x18\x04 \x01(\t\x12\x31\n\rscheduledTime\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\x16\n\x14SignalEntityResponse\"<\n\x10GetEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x14\n\x0cincludeState\x18\x02 \x01(\x08\"D\n\x11GetEntityResponse\x12\x0e\n\x06\x65xists\x18\x01 \x01(\x08\x12\x1f\n\x06\x65ntity\x18\x02 \x01(\x0b\x32\x0f.EntityMetadata\"\xcb\x02\n\x0b\x45ntityQuery\x12:\n\x14instanceIdStartsWith\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x34\n\x10lastModifiedFrom\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0elastModifiedTo\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x14\n\x0cincludeState\x18\x04 \x01(\x08\x12\x18\n\x10includeTransient\x18\x05 \x01(\x08\x12-\n\x08pageSize\x18\x06 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12\x37\n\x11\x63ontinuationToken\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"3\n\x14QueryEntitiesRequest\x12\x1b\n\x05query\x18\x01 \x01(\x0b\x32\x0c.EntityQuery\"s\n\x15QueryEntitiesResponse\x12!\n\x08\x65ntities\x18\x01 \x03(\x0b\x32\x0f.EntityMetadata\x12\x37\n\x11\x63ontinuationToken\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xdb\x01\n\x0e\x45ntityMetadata\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x34\n\x10lastModifiedTime\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x18\n\x10\x62\x61\x63klogQueueSize\x18\x03 \x01(\x05\x12.\n\x08lockedBy\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x0fserializedState\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x8f\x01\n\x19\x43leanEntityStorageRequest\x12\x37\n\x11\x63ontinuationToken\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1b\n\x13removeEmptyEntities\x18\x02 \x01(\x08\x12\x1c\n\x14releaseOrphanedLocks\x18\x03 \x01(\x08\"\x92\x01\n\x1a\x43leanEntityStorageResponse\x12\x37\n\x11\x63ontinuationToken\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1c\n\x14\x65mptyEntitiesRemoved\x18\x02 \x01(\x05\x12\x1d\n\x15orphanedLocksReleased\x18\x03 \x01(\x05\"]\n\x1cOrchestratorEntityParameters\x12=\n\x1a\x65ntityMessageReorderWindow\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\"\x82\x01\n\x12\x45ntityBatchRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65ntityState\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12%\n\noperations\x18\x03 \x03(\x0b\x32\x11.OperationRequest\"\xb9\x01\n\x11\x45ntityBatchResult\x12!\n\x07results\x18\x01 \x03(\x0b\x32\x10.OperationResult\x12!\n\x07\x61\x63tions\x18\x02 \x03(\x0b\x32\x10.OperationAction\x12\x31\n\x0b\x65ntityState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\"e\n\x10OperationRequest\x12\x11\n\toperation\x18\x01 \x01(\t\x12\x11\n\trequestId\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"w\n\x0fOperationResult\x12*\n\x07success\x18\x01 \x01(\x0b\x32\x17.OperationResultSuccessH\x00\x12*\n\x07\x66\x61ilure\x18\x02 \x01(\x0b\x32\x17.OperationResultFailureH\x00\x42\x0c\n\nresultType\"F\n\x16OperationResultSuccess\x12,\n\x06result\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"E\n\x16OperationResultFailure\x12+\n\x0e\x66\x61ilureDetails\x18\x01 \x01(\x0b\x32\x13.TaskFailureDetails\"\x9c\x01\n\x0fOperationAction\x12\n\n\x02id\x18\x01 \x01(\x05\x12\'\n\nsendSignal\x18\x02 \x01(\x0b\x32\x11.SendSignalActionH\x00\x12=\n\x15startNewOrchestration\x18\x03 \x01(\x0b\x32\x1c.StartNewOrchestrationActionH\x00\x42\x15\n\x13operationActionType\"\x94\x01\n\x10SendSignalAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rscheduledTime\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xce\x01\n\x1bStartNewOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rscheduledTime\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\x15\n\x13GetWorkItemsRequest\"\xe1\x01\n\x08WorkItem\x12\x33\n\x13orchestratorRequest\x18\x01 \x01(\x0b\x32\x14.OrchestratorRequestH\x00\x12+\n\x0f\x61\x63tivityRequest\x18\x02 \x01(\x0b\x32\x10.ActivityRequestH\x00\x12,\n\rentityRequest\x18\x03 \x01(\x0b\x32\x13.EntityBatchRequestH\x00\x12!\n\nhealthPing\x18\x04 \x01(\x0b\x32\x0b.HealthPingH\x00\x12\x17\n\x0f\x63ompletionToken\x18\n \x01(\tB\t\n\x07request\"\x16\n\x14\x43ompleteTaskResponse\"\x0c\n\nHealthPing*\xb5\x02\n\x13OrchestrationStatus\x12 \n\x1cORCHESTRATION_STATUS_RUNNING\x10\x00\x12\"\n\x1eORCHESTRATION_STATUS_COMPLETED\x10\x01\x12)\n%ORCHESTRATION_STATUS_CONTINUED_AS_NEW\x10\x02\x12\x1f\n\x1bORCHESTRATION_STATUS_FAILED\x10\x03\x12!\n\x1dORCHESTRATION_STATUS_CANCELED\x10\x04\x12#\n\x1fORCHESTRATION_STATUS_TERMINATED\x10\x05\x12 \n\x1cORCHESTRATION_STATUS_PENDING\x10\x06\x12\"\n\x1eORCHESTRATION_STATUS_SUSPENDED\x10\x07*A\n\x19\x43reateOrchestrationAction\x12\t\n\x05\x45RROR\x10\x00\x12\n\n\x06IGNORE\x10\x01\x12\r\n\tTERMINATE\x10\x02\x32\xfc\n\n\x15TaskHubSidecarService\x12\x37\n\x05Hello\x12\x16.google.protobuf.Empty\x1a\x16.google.protobuf.Empty\x12@\n\rStartInstance\x12\x16.CreateInstanceRequest\x1a\x17.CreateInstanceResponse\x12\x38\n\x0bGetInstance\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x41\n\x0eRewindInstance\x12\x16.RewindInstanceRequest\x1a\x17.RewindInstanceResponse\x12\x41\n\x14WaitForInstanceStart\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x46\n\x19WaitForInstanceCompletion\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x35\n\nRaiseEvent\x12\x12.RaiseEventRequest\x1a\x13.RaiseEventResponse\x12:\n\x11TerminateInstance\x12\x11.TerminateRequest\x1a\x12.TerminateResponse\x12\x34\n\x0fSuspendInstance\x12\x0f.SuspendRequest\x1a\x10.SuspendResponse\x12\x31\n\x0eResumeInstance\x12\x0e.ResumeRequest\x1a\x0f.ResumeResponse\x12\x41\n\x0eQueryInstances\x12\x16.QueryInstancesRequest\x1a\x17.QueryInstancesResponse\x12\x41\n\x0ePurgeInstances\x12\x16.PurgeInstancesRequest\x1a\x17.PurgeInstancesResponse\x12\x31\n\x0cGetWorkItems\x12\x14.GetWorkItemsRequest\x1a\t.WorkItem0\x01\x12@\n\x14\x43ompleteActivityTask\x12\x11.ActivityResponse\x1a\x15.CompleteTaskResponse\x12H\n\x18\x43ompleteOrchestratorTask\x12\x15.OrchestratorResponse\x1a\x15.CompleteTaskResponse\x12?\n\x12\x43ompleteEntityTask\x12\x12.EntityBatchResult\x1a\x15.CompleteTaskResponse\x12>\n\rCreateTaskHub\x12\x15.CreateTaskHubRequest\x1a\x16.CreateTaskHubResponse\x12>\n\rDeleteTaskHub\x12\x15.DeleteTaskHubRequest\x1a\x16.DeleteTaskHubResponse\x12;\n\x0cSignalEntity\x12\x14.SignalEntityRequest\x1a\x15.SignalEntityResponse\x12\x32\n\tGetEntity\x12\x11.GetEntityRequest\x1a\x12.GetEntityResponse\x12>\n\rQueryEntities\x12\x15.QueryEntitiesRequest\x1a\x16.QueryEntitiesResponse\x12M\n\x12\x43leanEntityStorage\x12\x1a.CleanEntityStorageRequest\x1a\x1b.CleanEntityStorageResponseBf\n1com.microsoft.durabletask.implementation.protobufZ\x10/internal/protos\xaa\x02\x1eMicrosoft.DurableTask.Protobufb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n/durabletask/internal/orchestrator_service.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1bgoogle/protobuf/empty.proto\"^\n\x15OrchestrationInstance\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xed\x01\n\x0f\x41\x63tivityRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\x12\x0e\n\x06taskId\x18\x05 \x01(\x05\x12)\n\x12parentTraceContext\x18\x06 \x01(\x0b\x32\r.TraceContext\"\x91\x01\n\x10\x41\x63tivityResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0e\n\x06taskId\x18\x02 \x01(\x05\x12,\n\x06result\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\"\xb2\x01\n\x12TaskFailureDetails\x12\x11\n\terrorType\x18\x01 \x01(\t\x12\x14\n\x0c\x65rrorMessage\x18\x02 \x01(\t\x12\x30\n\nstackTrace\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x0cinnerFailure\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\x12\x16\n\x0eisNonRetriable\x18\x05 \x01(\x08\"\xbf\x01\n\x12ParentInstanceInfo\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12*\n\x04name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\"i\n\x0cTraceContext\x12\x13\n\x0btraceParent\x18\x01 \x01(\t\x12\x12\n\x06spanID\x18\x02 \x01(\tB\x02\x18\x01\x12\x30\n\ntraceState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x88\x03\n\x15\x45xecutionStartedEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\x12+\n\x0eparentInstance\x18\x05 \x01(\x0b\x32\x13.ParentInstanceInfo\x12;\n\x17scheduledStartTimestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12)\n\x12parentTraceContext\x18\x07 \x01(\x0b\x32\r.TraceContext\x12\x39\n\x13orchestrationSpanID\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xa7\x01\n\x17\x45xecutionCompletedEvent\x12\x31\n\x13orchestrationStatus\x18\x01 \x01(\x0e\x32\x14.OrchestrationStatus\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x03 \x01(\x0b\x32\x13.TaskFailureDetails\"X\n\x18\x45xecutionTerminatedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x0f\n\x07recurse\x18\x02 \x01(\x08\"\xa9\x01\n\x12TaskScheduledEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x12parentTraceContext\x18\x04 \x01(\x0b\x32\r.TraceContext\"[\n\x12TaskCompletedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"W\n\x0fTaskFailedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\"\xcf\x01\n$SubOrchestrationInstanceCreatedEvent\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x12parentTraceContext\x18\x05 \x01(\x0b\x32\r.TraceContext\"o\n&SubOrchestrationInstanceCompletedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"k\n#SubOrchestrationInstanceFailedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\"?\n\x11TimerCreatedEvent\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"N\n\x0fTimerFiredEvent\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0f\n\x07timerId\x18\x02 \x01(\x05\"\x1a\n\x18OrchestratorStartedEvent\"\x1c\n\x1aOrchestratorCompletedEvent\"_\n\x0e\x45ventSentEvent\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"M\n\x10\x45ventRaisedEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x05input\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\":\n\x0cGenericEvent\x12*\n\x04\x64\x61ta\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"D\n\x11HistoryStateEvent\x12/\n\x12orchestrationState\x18\x01 \x01(\x0b\x32\x13.OrchestrationState\"A\n\x12\x43ontinueAsNewEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"F\n\x17\x45xecutionSuspendedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"D\n\x15\x45xecutionResumedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x86\t\n\x0cHistoryEvent\x12\x0f\n\x07\x65ventId\x18\x01 \x01(\x05\x12-\n\ttimestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x10\x65xecutionStarted\x18\x03 \x01(\x0b\x32\x16.ExecutionStartedEventH\x00\x12\x36\n\x12\x65xecutionCompleted\x18\x04 \x01(\x0b\x32\x18.ExecutionCompletedEventH\x00\x12\x38\n\x13\x65xecutionTerminated\x18\x05 \x01(\x0b\x32\x19.ExecutionTerminatedEventH\x00\x12,\n\rtaskScheduled\x18\x06 \x01(\x0b\x32\x13.TaskScheduledEventH\x00\x12,\n\rtaskCompleted\x18\x07 \x01(\x0b\x32\x13.TaskCompletedEventH\x00\x12&\n\ntaskFailed\x18\x08 \x01(\x0b\x32\x10.TaskFailedEventH\x00\x12P\n\x1fsubOrchestrationInstanceCreated\x18\t \x01(\x0b\x32%.SubOrchestrationInstanceCreatedEventH\x00\x12T\n!subOrchestrationInstanceCompleted\x18\n \x01(\x0b\x32\'.SubOrchestrationInstanceCompletedEventH\x00\x12N\n\x1esubOrchestrationInstanceFailed\x18\x0b \x01(\x0b\x32$.SubOrchestrationInstanceFailedEventH\x00\x12*\n\x0ctimerCreated\x18\x0c \x01(\x0b\x32\x12.TimerCreatedEventH\x00\x12&\n\ntimerFired\x18\r \x01(\x0b\x32\x10.TimerFiredEventH\x00\x12\x38\n\x13orchestratorStarted\x18\x0e \x01(\x0b\x32\x19.OrchestratorStartedEventH\x00\x12<\n\x15orchestratorCompleted\x18\x0f \x01(\x0b\x32\x1b.OrchestratorCompletedEventH\x00\x12$\n\teventSent\x18\x10 \x01(\x0b\x32\x0f.EventSentEventH\x00\x12(\n\x0b\x65ventRaised\x18\x11 \x01(\x0b\x32\x11.EventRaisedEventH\x00\x12%\n\x0cgenericEvent\x18\x12 \x01(\x0b\x32\r.GenericEventH\x00\x12*\n\x0chistoryState\x18\x13 \x01(\x0b\x32\x12.HistoryStateEventH\x00\x12,\n\rcontinueAsNew\x18\x14 \x01(\x0b\x32\x13.ContinueAsNewEventH\x00\x12\x36\n\x12\x65xecutionSuspended\x18\x15 \x01(\x0b\x32\x18.ExecutionSuspendedEventH\x00\x12\x32\n\x10\x65xecutionResumed\x18\x16 \x01(\x0b\x32\x16.ExecutionResumedEventH\x00\x42\x0b\n\teventType\"~\n\x12ScheduleTaskAction\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x9c\x01\n\x1c\x43reateSubOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"?\n\x11\x43reateTimerAction\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"u\n\x0fSendEventAction\x12(\n\x08instance\x18\x01 \x01(\x0b\x32\x16.OrchestrationInstance\x12\x0c\n\x04name\x18\x02 \x01(\t\x12*\n\x04\x64\x61ta\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xb4\x02\n\x1b\x43ompleteOrchestrationAction\x12\x31\n\x13orchestrationStatus\x18\x01 \x01(\x0e\x32\x14.OrchestrationStatus\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07\x64\x65tails\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\nnewVersion\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12&\n\x0f\x63\x61rryoverEvents\x18\x05 \x03(\x0b\x32\r.HistoryEvent\x12+\n\x0e\x66\x61ilureDetails\x18\x06 \x01(\x0b\x32\x13.TaskFailureDetails\"q\n\x1cTerminateOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x0f\n\x07recurse\x18\x03 \x01(\x08\"\xfa\x02\n\x12OrchestratorAction\x12\n\n\x02id\x18\x01 \x01(\x05\x12+\n\x0cscheduleTask\x18\x02 \x01(\x0b\x32\x13.ScheduleTaskActionH\x00\x12?\n\x16\x63reateSubOrchestration\x18\x03 \x01(\x0b\x32\x1d.CreateSubOrchestrationActionH\x00\x12)\n\x0b\x63reateTimer\x18\x04 \x01(\x0b\x32\x12.CreateTimerActionH\x00\x12%\n\tsendEvent\x18\x05 \x01(\x0b\x32\x10.SendEventActionH\x00\x12=\n\x15\x63ompleteOrchestration\x18\x06 \x01(\x0b\x32\x1c.CompleteOrchestrationActionH\x00\x12?\n\x16terminateOrchestration\x18\x07 \x01(\x0b\x32\x1d.TerminateOrchestrationActionH\x00\x42\x18\n\x16orchestratorActionType\"\xda\x01\n\x13OrchestratorRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12!\n\npastEvents\x18\x03 \x03(\x0b\x32\r.HistoryEvent\x12 \n\tnewEvents\x18\x04 \x03(\x0b\x32\r.HistoryEvent\x12\x37\n\x10\x65ntityParameters\x18\x05 \x01(\x0b\x32\x1d.OrchestratorEntityParameters\"\x84\x01\n\x14OrchestratorResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12$\n\x07\x61\x63tions\x18\x02 \x03(\x0b\x32\x13.OrchestratorAction\x12\x32\n\x0c\x63ustomStatus\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xa3\x03\n\x15\x43reateInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12;\n\x17scheduledStartTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12?\n\x1aorchestrationIdReusePolicy\x18\x06 \x01(\x0b\x32\x1b.OrchestrationIdReusePolicy\x12\x31\n\x0b\x65xecutionId\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x04tags\x18\x08 \x03(\x0b\x32 .CreateInstanceRequest.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"w\n\x1aOrchestrationIdReusePolicy\x12-\n\x0foperationStatus\x18\x01 \x03(\x0e\x32\x14.OrchestrationStatus\x12*\n\x06\x61\x63tion\x18\x02 \x01(\x0e\x32\x1a.CreateOrchestrationAction\",\n\x16\x43reateInstanceResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\"E\n\x12GetInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x1b\n\x13getInputsAndOutputs\x18\x02 \x01(\x08\"V\n\x13GetInstanceResponse\x12\x0e\n\x06\x65xists\x18\x01 \x01(\x08\x12/\n\x12orchestrationState\x18\x02 \x01(\x0b\x32\x13.OrchestrationState\"Y\n\x15RewindInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x18\n\x16RewindInstanceResponse\"\xa4\x05\n\x12OrchestrationState\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\x13orchestrationStatus\x18\x04 \x01(\x0e\x32\x14.OrchestrationStatus\x12;\n\x17scheduledStartTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x34\n\x10\x63reatedTimestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x38\n\x14lastUpdatedTimestamp\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\x05input\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12,\n\x06output\x18\t \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0c\x63ustomStatus\x18\n \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x0b \x01(\x0b\x32\x13.TaskFailureDetails\x12\x31\n\x0b\x65xecutionId\x18\x0c \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x12\x63ompletedTimestamp\x18\r \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x36\n\x10parentInstanceId\x18\x0e \x01(\x0b\x32\x1c.google.protobuf.StringValue\"b\n\x11RaiseEventRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x14\n\x12RaiseEventResponse\"g\n\x10TerminateRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06output\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x11\n\trecursive\x18\x03 \x01(\x08\"\x13\n\x11TerminateResponse\"R\n\x0eSuspendRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x11\n\x0fSuspendResponse\"Q\n\rResumeRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x10\n\x0eResumeResponse\"6\n\x15QueryInstancesRequest\x12\x1d\n\x05query\x18\x01 \x01(\x0b\x32\x0e.InstanceQuery\"\x82\x03\n\rInstanceQuery\x12+\n\rruntimeStatus\x18\x01 \x03(\x0e\x32\x14.OrchestrationStatus\x12\x33\n\x0f\x63reatedTimeFrom\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rcreatedTimeTo\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0ctaskHubNames\x18\x04 \x03(\x0b\x32\x1c.google.protobuf.StringValue\x12\x18\n\x10maxInstanceCount\x18\x05 \x01(\x05\x12\x37\n\x11\x63ontinuationToken\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10instanceIdPrefix\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1d\n\x15\x66\x65tchInputsAndOutputs\x18\x08 \x01(\x08\"\x82\x01\n\x16QueryInstancesResponse\x12/\n\x12orchestrationState\x18\x01 \x03(\x0b\x32\x13.OrchestrationState\x12\x37\n\x11\x63ontinuationToken\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x80\x01\n\x15PurgeInstancesRequest\x12\x14\n\ninstanceId\x18\x01 \x01(\tH\x00\x12\x33\n\x13purgeInstanceFilter\x18\x02 \x01(\x0b\x32\x14.PurgeInstanceFilterH\x00\x12\x11\n\trecursive\x18\x03 \x01(\x08\x42\t\n\x07request\"\xaa\x01\n\x13PurgeInstanceFilter\x12\x33\n\x0f\x63reatedTimeFrom\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rcreatedTimeTo\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\rruntimeStatus\x18\x03 \x03(\x0e\x32\x14.OrchestrationStatus\"6\n\x16PurgeInstancesResponse\x12\x1c\n\x14\x64\x65letedInstanceCount\x18\x01 \x01(\x05\"0\n\x14\x43reateTaskHubRequest\x12\x18\n\x10recreateIfExists\x18\x01 \x01(\x08\"\x17\n\x15\x43reateTaskHubResponse\"\x16\n\x14\x44\x65leteTaskHubRequest\"\x17\n\x15\x44\x65leteTaskHubResponse\"\xaa\x01\n\x13SignalEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x11\n\trequestId\x18\x04 \x01(\t\x12\x31\n\rscheduledTime\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\x16\n\x14SignalEntityResponse\"<\n\x10GetEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x14\n\x0cincludeState\x18\x02 \x01(\x08\"D\n\x11GetEntityResponse\x12\x0e\n\x06\x65xists\x18\x01 \x01(\x08\x12\x1f\n\x06\x65ntity\x18\x02 \x01(\x0b\x32\x0f.EntityMetadata\"\xcb\x02\n\x0b\x45ntityQuery\x12:\n\x14instanceIdStartsWith\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x34\n\x10lastModifiedFrom\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0elastModifiedTo\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x14\n\x0cincludeState\x18\x04 \x01(\x08\x12\x18\n\x10includeTransient\x18\x05 \x01(\x08\x12-\n\x08pageSize\x18\x06 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12\x37\n\x11\x63ontinuationToken\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"3\n\x14QueryEntitiesRequest\x12\x1b\n\x05query\x18\x01 \x01(\x0b\x32\x0c.EntityQuery\"s\n\x15QueryEntitiesResponse\x12!\n\x08\x65ntities\x18\x01 \x03(\x0b\x32\x0f.EntityMetadata\x12\x37\n\x11\x63ontinuationToken\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xdb\x01\n\x0e\x45ntityMetadata\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x34\n\x10lastModifiedTime\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x18\n\x10\x62\x61\x63klogQueueSize\x18\x03 \x01(\x05\x12.\n\x08lockedBy\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x0fserializedState\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x8f\x01\n\x19\x43leanEntityStorageRequest\x12\x37\n\x11\x63ontinuationToken\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1b\n\x13removeEmptyEntities\x18\x02 \x01(\x08\x12\x1c\n\x14releaseOrphanedLocks\x18\x03 \x01(\x08\"\x92\x01\n\x1a\x43leanEntityStorageResponse\x12\x37\n\x11\x63ontinuationToken\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1c\n\x14\x65mptyEntitiesRemoved\x18\x02 \x01(\x05\x12\x1d\n\x15orphanedLocksReleased\x18\x03 \x01(\x05\"]\n\x1cOrchestratorEntityParameters\x12=\n\x1a\x65ntityMessageReorderWindow\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\"\x82\x01\n\x12\x45ntityBatchRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65ntityState\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12%\n\noperations\x18\x03 \x03(\x0b\x32\x11.OperationRequest\"\xb9\x01\n\x11\x45ntityBatchResult\x12!\n\x07results\x18\x01 \x03(\x0b\x32\x10.OperationResult\x12!\n\x07\x61\x63tions\x18\x02 \x03(\x0b\x32\x10.OperationAction\x12\x31\n\x0b\x65ntityState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\"e\n\x10OperationRequest\x12\x11\n\toperation\x18\x01 \x01(\t\x12\x11\n\trequestId\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"w\n\x0fOperationResult\x12*\n\x07success\x18\x01 \x01(\x0b\x32\x17.OperationResultSuccessH\x00\x12*\n\x07\x66\x61ilure\x18\x02 \x01(\x0b\x32\x17.OperationResultFailureH\x00\x42\x0c\n\nresultType\"F\n\x16OperationResultSuccess\x12,\n\x06result\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"E\n\x16OperationResultFailure\x12+\n\x0e\x66\x61ilureDetails\x18\x01 \x01(\x0b\x32\x13.TaskFailureDetails\"\x9c\x01\n\x0fOperationAction\x12\n\n\x02id\x18\x01 \x01(\x05\x12\'\n\nsendSignal\x18\x02 \x01(\x0b\x32\x11.SendSignalActionH\x00\x12=\n\x15startNewOrchestration\x18\x03 \x01(\x0b\x32\x1c.StartNewOrchestrationActionH\x00\x42\x15\n\x13operationActionType\"\x94\x01\n\x10SendSignalAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rscheduledTime\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xce\x01\n\x1bStartNewOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rscheduledTime\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\x15\n\x13GetWorkItemsRequest\"\xe1\x01\n\x08WorkItem\x12\x33\n\x13orchestratorRequest\x18\x01 \x01(\x0b\x32\x14.OrchestratorRequestH\x00\x12+\n\x0f\x61\x63tivityRequest\x18\x02 \x01(\x0b\x32\x10.ActivityRequestH\x00\x12,\n\rentityRequest\x18\x03 \x01(\x0b\x32\x13.EntityBatchRequestH\x00\x12!\n\nhealthPing\x18\x04 \x01(\x0b\x32\x0b.HealthPingH\x00\x12\x17\n\x0f\x63ompletionToken\x18\n \x01(\tB\t\n\x07request\"\x16\n\x14\x43ompleteTaskResponse\"\x0c\n\nHealthPing*\xb5\x02\n\x13OrchestrationStatus\x12 \n\x1cORCHESTRATION_STATUS_RUNNING\x10\x00\x12\"\n\x1eORCHESTRATION_STATUS_COMPLETED\x10\x01\x12)\n%ORCHESTRATION_STATUS_CONTINUED_AS_NEW\x10\x02\x12\x1f\n\x1bORCHESTRATION_STATUS_FAILED\x10\x03\x12!\n\x1dORCHESTRATION_STATUS_CANCELED\x10\x04\x12#\n\x1fORCHESTRATION_STATUS_TERMINATED\x10\x05\x12 \n\x1cORCHESTRATION_STATUS_PENDING\x10\x06\x12\"\n\x1eORCHESTRATION_STATUS_SUSPENDED\x10\x07*A\n\x19\x43reateOrchestrationAction\x12\t\n\x05\x45RROR\x10\x00\x12\n\n\x06IGNORE\x10\x01\x12\r\n\tTERMINATE\x10\x02\x32\xfc\n\n\x15TaskHubSidecarService\x12\x37\n\x05Hello\x12\x16.google.protobuf.Empty\x1a\x16.google.protobuf.Empty\x12@\n\rStartInstance\x12\x16.CreateInstanceRequest\x1a\x17.CreateInstanceResponse\x12\x38\n\x0bGetInstance\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x41\n\x0eRewindInstance\x12\x16.RewindInstanceRequest\x1a\x17.RewindInstanceResponse\x12\x41\n\x14WaitForInstanceStart\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x46\n\x19WaitForInstanceCompletion\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x35\n\nRaiseEvent\x12\x12.RaiseEventRequest\x1a\x13.RaiseEventResponse\x12:\n\x11TerminateInstance\x12\x11.TerminateRequest\x1a\x12.TerminateResponse\x12\x34\n\x0fSuspendInstance\x12\x0f.SuspendRequest\x1a\x10.SuspendResponse\x12\x31\n\x0eResumeInstance\x12\x0e.ResumeRequest\x1a\x0f.ResumeResponse\x12\x41\n\x0eQueryInstances\x12\x16.QueryInstancesRequest\x1a\x17.QueryInstancesResponse\x12\x41\n\x0ePurgeInstances\x12\x16.PurgeInstancesRequest\x1a\x17.PurgeInstancesResponse\x12\x31\n\x0cGetWorkItems\x12\x14.GetWorkItemsRequest\x1a\t.WorkItem0\x01\x12@\n\x14\x43ompleteActivityTask\x12\x11.ActivityResponse\x1a\x15.CompleteTaskResponse\x12H\n\x18\x43ompleteOrchestratorTask\x12\x15.OrchestratorResponse\x1a\x15.CompleteTaskResponse\x12?\n\x12\x43ompleteEntityTask\x12\x12.EntityBatchResult\x1a\x15.CompleteTaskResponse\x12>\n\rCreateTaskHub\x12\x15.CreateTaskHubRequest\x1a\x16.CreateTaskHubResponse\x12>\n\rDeleteTaskHub\x12\x15.DeleteTaskHubRequest\x1a\x16.DeleteTaskHubResponse\x12;\n\x0cSignalEntity\x12\x14.SignalEntityRequest\x1a\x15.SignalEntityResponse\x12\x32\n\tGetEntity\x12\x11.GetEntityRequest\x1a\x12.GetEntityResponse\x12>\n\rQueryEntities\x12\x15.QueryEntitiesRequest\x1a\x16.QueryEntitiesResponse\x12M\n\x12\x43leanEntityStorage\x12\x1a.CleanEntityStorageRequest\x1a\x1b.CleanEntityStorageResponseBf\n1com.microsoft.durabletask.implementation.protobufZ\x10/internal/protos\xaa\x02\x1eMicrosoft.DurableTask.Protobufb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'orchestrator_service_pb2', _globals) -if not _descriptor._USE_C_DESCRIPTORS: - _globals['DESCRIPTOR']._loaded_options = None +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'durabletask.internal.orchestrator_service_pb2', _globals) +if _descriptor._USE_C_DESCRIPTORS == False: + _globals['DESCRIPTOR']._options = None _globals['DESCRIPTOR']._serialized_options = b'\n1com.microsoft.durabletask.implementation.protobufZ\020/internal/protos\252\002\036Microsoft.DurableTask.Protobuf' - _globals['_TRACECONTEXT'].fields_by_name['spanID']._loaded_options = None + _globals['_TRACECONTEXT'].fields_by_name['spanID']._options = None _globals['_TRACECONTEXT'].fields_by_name['spanID']._serialized_options = b'\030\001' - _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._loaded_options = None + _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._options = None _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_options = b'8\001' - _globals['_ORCHESTRATIONSTATUS']._serialized_start=12076 - _globals['_ORCHESTRATIONSTATUS']._serialized_end=12385 - _globals['_CREATEORCHESTRATIONACTION']._serialized_start=12387 - _globals['_CREATEORCHESTRATIONACTION']._serialized_end=12452 - _globals['_ORCHESTRATIONINSTANCE']._serialized_start=156 - _globals['_ORCHESTRATIONINSTANCE']._serialized_end=250 - _globals['_ACTIVITYREQUEST']._serialized_start=253 - _globals['_ACTIVITYREQUEST']._serialized_end=490 - _globals['_ACTIVITYRESPONSE']._serialized_start=493 - _globals['_ACTIVITYRESPONSE']._serialized_end=638 - _globals['_TASKFAILUREDETAILS']._serialized_start=641 - _globals['_TASKFAILUREDETAILS']._serialized_end=819 - _globals['_PARENTINSTANCEINFO']._serialized_start=822 - _globals['_PARENTINSTANCEINFO']._serialized_end=1013 - _globals['_TRACECONTEXT']._serialized_start=1015 - _globals['_TRACECONTEXT']._serialized_end=1120 - _globals['_EXECUTIONSTARTEDEVENT']._serialized_start=1123 - _globals['_EXECUTIONSTARTEDEVENT']._serialized_end=1515 - _globals['_EXECUTIONCOMPLETEDEVENT']._serialized_start=1518 - _globals['_EXECUTIONCOMPLETEDEVENT']._serialized_end=1685 - _globals['_EXECUTIONTERMINATEDEVENT']._serialized_start=1687 - _globals['_EXECUTIONTERMINATEDEVENT']._serialized_end=1775 - _globals['_TASKSCHEDULEDEVENT']._serialized_start=1778 - _globals['_TASKSCHEDULEDEVENT']._serialized_end=1947 - _globals['_TASKCOMPLETEDEVENT']._serialized_start=1949 - _globals['_TASKCOMPLETEDEVENT']._serialized_end=2040 - _globals['_TASKFAILEDEVENT']._serialized_start=2042 - _globals['_TASKFAILEDEVENT']._serialized_end=2129 - _globals['_SUBORCHESTRATIONINSTANCECREATEDEVENT']._serialized_start=2132 - _globals['_SUBORCHESTRATIONINSTANCECREATEDEVENT']._serialized_end=2339 - _globals['_SUBORCHESTRATIONINSTANCECOMPLETEDEVENT']._serialized_start=2341 - _globals['_SUBORCHESTRATIONINSTANCECOMPLETEDEVENT']._serialized_end=2452 - _globals['_SUBORCHESTRATIONINSTANCEFAILEDEVENT']._serialized_start=2454 - _globals['_SUBORCHESTRATIONINSTANCEFAILEDEVENT']._serialized_end=2561 - _globals['_TIMERCREATEDEVENT']._serialized_start=2563 - _globals['_TIMERCREATEDEVENT']._serialized_end=2626 - _globals['_TIMERFIREDEVENT']._serialized_start=2628 - _globals['_TIMERFIREDEVENT']._serialized_end=2706 - _globals['_ORCHESTRATORSTARTEDEVENT']._serialized_start=2708 - _globals['_ORCHESTRATORSTARTEDEVENT']._serialized_end=2734 - _globals['_ORCHESTRATORCOMPLETEDEVENT']._serialized_start=2736 - _globals['_ORCHESTRATORCOMPLETEDEVENT']._serialized_end=2764 - _globals['_EVENTSENTEVENT']._serialized_start=2766 - _globals['_EVENTSENTEVENT']._serialized_end=2861 - _globals['_EVENTRAISEDEVENT']._serialized_start=2863 - _globals['_EVENTRAISEDEVENT']._serialized_end=2940 - _globals['_GENERICEVENT']._serialized_start=2942 - _globals['_GENERICEVENT']._serialized_end=3000 - _globals['_HISTORYSTATEEVENT']._serialized_start=3002 - _globals['_HISTORYSTATEEVENT']._serialized_end=3070 - _globals['_CONTINUEASNEWEVENT']._serialized_start=3072 - _globals['_CONTINUEASNEWEVENT']._serialized_end=3137 - _globals['_EXECUTIONSUSPENDEDEVENT']._serialized_start=3139 - _globals['_EXECUTIONSUSPENDEDEVENT']._serialized_end=3209 - _globals['_EXECUTIONRESUMEDEVENT']._serialized_start=3211 - _globals['_EXECUTIONRESUMEDEVENT']._serialized_end=3279 - _globals['_HISTORYEVENT']._serialized_start=3282 - _globals['_HISTORYEVENT']._serialized_end=4440 - _globals['_SCHEDULETASKACTION']._serialized_start=4442 - _globals['_SCHEDULETASKACTION']._serialized_end=4568 - _globals['_CREATESUBORCHESTRATIONACTION']._serialized_start=4571 - _globals['_CREATESUBORCHESTRATIONACTION']._serialized_end=4727 - _globals['_CREATETIMERACTION']._serialized_start=4729 - _globals['_CREATETIMERACTION']._serialized_end=4792 - _globals['_SENDEVENTACTION']._serialized_start=4794 - _globals['_SENDEVENTACTION']._serialized_end=4911 - _globals['_COMPLETEORCHESTRATIONACTION']._serialized_start=4914 - _globals['_COMPLETEORCHESTRATIONACTION']._serialized_end=5222 - _globals['_TERMINATEORCHESTRATIONACTION']._serialized_start=5224 - _globals['_TERMINATEORCHESTRATIONACTION']._serialized_end=5337 - _globals['_ORCHESTRATORACTION']._serialized_start=5340 - _globals['_ORCHESTRATORACTION']._serialized_end=5718 - _globals['_ORCHESTRATORREQUEST']._serialized_start=5721 - _globals['_ORCHESTRATORREQUEST']._serialized_end=5939 - _globals['_ORCHESTRATORRESPONSE']._serialized_start=5942 - _globals['_ORCHESTRATORRESPONSE']._serialized_end=6074 - _globals['_CREATEINSTANCEREQUEST']._serialized_start=6077 - _globals['_CREATEINSTANCEREQUEST']._serialized_end=6496 - _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_start=6453 - _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_end=6496 - _globals['_ORCHESTRATIONIDREUSEPOLICY']._serialized_start=6498 - _globals['_ORCHESTRATIONIDREUSEPOLICY']._serialized_end=6617 - _globals['_CREATEINSTANCERESPONSE']._serialized_start=6619 - _globals['_CREATEINSTANCERESPONSE']._serialized_end=6663 - _globals['_GETINSTANCEREQUEST']._serialized_start=6665 - _globals['_GETINSTANCEREQUEST']._serialized_end=6734 - _globals['_GETINSTANCERESPONSE']._serialized_start=6736 - _globals['_GETINSTANCERESPONSE']._serialized_end=6822 - _globals['_REWINDINSTANCEREQUEST']._serialized_start=6824 - _globals['_REWINDINSTANCEREQUEST']._serialized_end=6913 - _globals['_REWINDINSTANCERESPONSE']._serialized_start=6915 - _globals['_REWINDINSTANCERESPONSE']._serialized_end=6939 - _globals['_ORCHESTRATIONSTATE']._serialized_start=6942 - _globals['_ORCHESTRATIONSTATE']._serialized_end=7618 - _globals['_RAISEEVENTREQUEST']._serialized_start=7620 - _globals['_RAISEEVENTREQUEST']._serialized_end=7718 - _globals['_RAISEEVENTRESPONSE']._serialized_start=7720 - _globals['_RAISEEVENTRESPONSE']._serialized_end=7740 - _globals['_TERMINATEREQUEST']._serialized_start=7742 - _globals['_TERMINATEREQUEST']._serialized_end=7845 - _globals['_TERMINATERESPONSE']._serialized_start=7847 - _globals['_TERMINATERESPONSE']._serialized_end=7866 - _globals['_SUSPENDREQUEST']._serialized_start=7868 - _globals['_SUSPENDREQUEST']._serialized_end=7950 - _globals['_SUSPENDRESPONSE']._serialized_start=7952 - _globals['_SUSPENDRESPONSE']._serialized_end=7969 - _globals['_RESUMEREQUEST']._serialized_start=7971 - _globals['_RESUMEREQUEST']._serialized_end=8052 - _globals['_RESUMERESPONSE']._serialized_start=8054 - _globals['_RESUMERESPONSE']._serialized_end=8070 - _globals['_QUERYINSTANCESREQUEST']._serialized_start=8072 - _globals['_QUERYINSTANCESREQUEST']._serialized_end=8126 - _globals['_INSTANCEQUERY']._serialized_start=8129 - _globals['_INSTANCEQUERY']._serialized_end=8515 - _globals['_QUERYINSTANCESRESPONSE']._serialized_start=8518 - _globals['_QUERYINSTANCESRESPONSE']._serialized_end=8648 - _globals['_PURGEINSTANCESREQUEST']._serialized_start=8651 - _globals['_PURGEINSTANCESREQUEST']._serialized_end=8779 - _globals['_PURGEINSTANCEFILTER']._serialized_start=8782 - _globals['_PURGEINSTANCEFILTER']._serialized_end=8952 - _globals['_PURGEINSTANCESRESPONSE']._serialized_start=8954 - _globals['_PURGEINSTANCESRESPONSE']._serialized_end=9008 - _globals['_CREATETASKHUBREQUEST']._serialized_start=9010 - _globals['_CREATETASKHUBREQUEST']._serialized_end=9058 - _globals['_CREATETASKHUBRESPONSE']._serialized_start=9060 - _globals['_CREATETASKHUBRESPONSE']._serialized_end=9083 - _globals['_DELETETASKHUBREQUEST']._serialized_start=9085 - _globals['_DELETETASKHUBREQUEST']._serialized_end=9107 - _globals['_DELETETASKHUBRESPONSE']._serialized_start=9109 - _globals['_DELETETASKHUBRESPONSE']._serialized_end=9132 - _globals['_SIGNALENTITYREQUEST']._serialized_start=9135 - _globals['_SIGNALENTITYREQUEST']._serialized_end=9305 - _globals['_SIGNALENTITYRESPONSE']._serialized_start=9307 - _globals['_SIGNALENTITYRESPONSE']._serialized_end=9329 - _globals['_GETENTITYREQUEST']._serialized_start=9331 - _globals['_GETENTITYREQUEST']._serialized_end=9391 - _globals['_GETENTITYRESPONSE']._serialized_start=9393 - _globals['_GETENTITYRESPONSE']._serialized_end=9461 - _globals['_ENTITYQUERY']._serialized_start=9464 - _globals['_ENTITYQUERY']._serialized_end=9795 - _globals['_QUERYENTITIESREQUEST']._serialized_start=9797 - _globals['_QUERYENTITIESREQUEST']._serialized_end=9848 - _globals['_QUERYENTITIESRESPONSE']._serialized_start=9850 - _globals['_QUERYENTITIESRESPONSE']._serialized_end=9965 - _globals['_ENTITYMETADATA']._serialized_start=9968 - _globals['_ENTITYMETADATA']._serialized_end=10187 - _globals['_CLEANENTITYSTORAGEREQUEST']._serialized_start=10190 - _globals['_CLEANENTITYSTORAGEREQUEST']._serialized_end=10333 - _globals['_CLEANENTITYSTORAGERESPONSE']._serialized_start=10336 - _globals['_CLEANENTITYSTORAGERESPONSE']._serialized_end=10482 - _globals['_ORCHESTRATORENTITYPARAMETERS']._serialized_start=10484 - _globals['_ORCHESTRATORENTITYPARAMETERS']._serialized_end=10577 - _globals['_ENTITYBATCHREQUEST']._serialized_start=10580 - _globals['_ENTITYBATCHREQUEST']._serialized_end=10710 - _globals['_ENTITYBATCHRESULT']._serialized_start=10713 - _globals['_ENTITYBATCHRESULT']._serialized_end=10898 - _globals['_OPERATIONREQUEST']._serialized_start=10900 - _globals['_OPERATIONREQUEST']._serialized_end=11001 - _globals['_OPERATIONRESULT']._serialized_start=11003 - _globals['_OPERATIONRESULT']._serialized_end=11122 - _globals['_OPERATIONRESULTSUCCESS']._serialized_start=11124 - _globals['_OPERATIONRESULTSUCCESS']._serialized_end=11194 - _globals['_OPERATIONRESULTFAILURE']._serialized_start=11196 - _globals['_OPERATIONRESULTFAILURE']._serialized_end=11265 - _globals['_OPERATIONACTION']._serialized_start=11268 - _globals['_OPERATIONACTION']._serialized_end=11424 - _globals['_SENDSIGNALACTION']._serialized_start=11427 - _globals['_SENDSIGNALACTION']._serialized_end=11575 - _globals['_STARTNEWORCHESTRATIONACTION']._serialized_start=11578 - _globals['_STARTNEWORCHESTRATIONACTION']._serialized_end=11784 - _globals['_GETWORKITEMSREQUEST']._serialized_start=11786 - _globals['_GETWORKITEMSREQUEST']._serialized_end=11807 - _globals['_WORKITEM']._serialized_start=11810 - _globals['_WORKITEM']._serialized_end=12035 - _globals['_COMPLETETASKRESPONSE']._serialized_start=12037 - _globals['_COMPLETETASKRESPONSE']._serialized_end=12059 - _globals['_HEALTHPING']._serialized_start=12061 - _globals['_HEALTHPING']._serialized_end=12073 - _globals['_TASKHUBSIDECARSERVICE']._serialized_start=12455 - _globals['_TASKHUBSIDECARSERVICE']._serialized_end=13859 + _globals['_ORCHESTRATIONSTATUS']._serialized_start=12097 + _globals['_ORCHESTRATIONSTATUS']._serialized_end=12406 + _globals['_CREATEORCHESTRATIONACTION']._serialized_start=12408 + _globals['_CREATEORCHESTRATIONACTION']._serialized_end=12473 + _globals['_ORCHESTRATIONINSTANCE']._serialized_start=177 + _globals['_ORCHESTRATIONINSTANCE']._serialized_end=271 + _globals['_ACTIVITYREQUEST']._serialized_start=274 + _globals['_ACTIVITYREQUEST']._serialized_end=511 + _globals['_ACTIVITYRESPONSE']._serialized_start=514 + _globals['_ACTIVITYRESPONSE']._serialized_end=659 + _globals['_TASKFAILUREDETAILS']._serialized_start=662 + _globals['_TASKFAILUREDETAILS']._serialized_end=840 + _globals['_PARENTINSTANCEINFO']._serialized_start=843 + _globals['_PARENTINSTANCEINFO']._serialized_end=1034 + _globals['_TRACECONTEXT']._serialized_start=1036 + _globals['_TRACECONTEXT']._serialized_end=1141 + _globals['_EXECUTIONSTARTEDEVENT']._serialized_start=1144 + _globals['_EXECUTIONSTARTEDEVENT']._serialized_end=1536 + _globals['_EXECUTIONCOMPLETEDEVENT']._serialized_start=1539 + _globals['_EXECUTIONCOMPLETEDEVENT']._serialized_end=1706 + _globals['_EXECUTIONTERMINATEDEVENT']._serialized_start=1708 + _globals['_EXECUTIONTERMINATEDEVENT']._serialized_end=1796 + _globals['_TASKSCHEDULEDEVENT']._serialized_start=1799 + _globals['_TASKSCHEDULEDEVENT']._serialized_end=1968 + _globals['_TASKCOMPLETEDEVENT']._serialized_start=1970 + _globals['_TASKCOMPLETEDEVENT']._serialized_end=2061 + _globals['_TASKFAILEDEVENT']._serialized_start=2063 + _globals['_TASKFAILEDEVENT']._serialized_end=2150 + _globals['_SUBORCHESTRATIONINSTANCECREATEDEVENT']._serialized_start=2153 + _globals['_SUBORCHESTRATIONINSTANCECREATEDEVENT']._serialized_end=2360 + _globals['_SUBORCHESTRATIONINSTANCECOMPLETEDEVENT']._serialized_start=2362 + _globals['_SUBORCHESTRATIONINSTANCECOMPLETEDEVENT']._serialized_end=2473 + _globals['_SUBORCHESTRATIONINSTANCEFAILEDEVENT']._serialized_start=2475 + _globals['_SUBORCHESTRATIONINSTANCEFAILEDEVENT']._serialized_end=2582 + _globals['_TIMERCREATEDEVENT']._serialized_start=2584 + _globals['_TIMERCREATEDEVENT']._serialized_end=2647 + _globals['_TIMERFIREDEVENT']._serialized_start=2649 + _globals['_TIMERFIREDEVENT']._serialized_end=2727 + _globals['_ORCHESTRATORSTARTEDEVENT']._serialized_start=2729 + _globals['_ORCHESTRATORSTARTEDEVENT']._serialized_end=2755 + _globals['_ORCHESTRATORCOMPLETEDEVENT']._serialized_start=2757 + _globals['_ORCHESTRATORCOMPLETEDEVENT']._serialized_end=2785 + _globals['_EVENTSENTEVENT']._serialized_start=2787 + _globals['_EVENTSENTEVENT']._serialized_end=2882 + _globals['_EVENTRAISEDEVENT']._serialized_start=2884 + _globals['_EVENTRAISEDEVENT']._serialized_end=2961 + _globals['_GENERICEVENT']._serialized_start=2963 + _globals['_GENERICEVENT']._serialized_end=3021 + _globals['_HISTORYSTATEEVENT']._serialized_start=3023 + _globals['_HISTORYSTATEEVENT']._serialized_end=3091 + _globals['_CONTINUEASNEWEVENT']._serialized_start=3093 + _globals['_CONTINUEASNEWEVENT']._serialized_end=3158 + _globals['_EXECUTIONSUSPENDEDEVENT']._serialized_start=3160 + _globals['_EXECUTIONSUSPENDEDEVENT']._serialized_end=3230 + _globals['_EXECUTIONRESUMEDEVENT']._serialized_start=3232 + _globals['_EXECUTIONRESUMEDEVENT']._serialized_end=3300 + _globals['_HISTORYEVENT']._serialized_start=3303 + _globals['_HISTORYEVENT']._serialized_end=4461 + _globals['_SCHEDULETASKACTION']._serialized_start=4463 + _globals['_SCHEDULETASKACTION']._serialized_end=4589 + _globals['_CREATESUBORCHESTRATIONACTION']._serialized_start=4592 + _globals['_CREATESUBORCHESTRATIONACTION']._serialized_end=4748 + _globals['_CREATETIMERACTION']._serialized_start=4750 + _globals['_CREATETIMERACTION']._serialized_end=4813 + _globals['_SENDEVENTACTION']._serialized_start=4815 + _globals['_SENDEVENTACTION']._serialized_end=4932 + _globals['_COMPLETEORCHESTRATIONACTION']._serialized_start=4935 + _globals['_COMPLETEORCHESTRATIONACTION']._serialized_end=5243 + _globals['_TERMINATEORCHESTRATIONACTION']._serialized_start=5245 + _globals['_TERMINATEORCHESTRATIONACTION']._serialized_end=5358 + _globals['_ORCHESTRATORACTION']._serialized_start=5361 + _globals['_ORCHESTRATORACTION']._serialized_end=5739 + _globals['_ORCHESTRATORREQUEST']._serialized_start=5742 + _globals['_ORCHESTRATORREQUEST']._serialized_end=5960 + _globals['_ORCHESTRATORRESPONSE']._serialized_start=5963 + _globals['_ORCHESTRATORRESPONSE']._serialized_end=6095 + _globals['_CREATEINSTANCEREQUEST']._serialized_start=6098 + _globals['_CREATEINSTANCEREQUEST']._serialized_end=6517 + _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_start=6474 + _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_end=6517 + _globals['_ORCHESTRATIONIDREUSEPOLICY']._serialized_start=6519 + _globals['_ORCHESTRATIONIDREUSEPOLICY']._serialized_end=6638 + _globals['_CREATEINSTANCERESPONSE']._serialized_start=6640 + _globals['_CREATEINSTANCERESPONSE']._serialized_end=6684 + _globals['_GETINSTANCEREQUEST']._serialized_start=6686 + _globals['_GETINSTANCEREQUEST']._serialized_end=6755 + _globals['_GETINSTANCERESPONSE']._serialized_start=6757 + _globals['_GETINSTANCERESPONSE']._serialized_end=6843 + _globals['_REWINDINSTANCEREQUEST']._serialized_start=6845 + _globals['_REWINDINSTANCEREQUEST']._serialized_end=6934 + _globals['_REWINDINSTANCERESPONSE']._serialized_start=6936 + _globals['_REWINDINSTANCERESPONSE']._serialized_end=6960 + _globals['_ORCHESTRATIONSTATE']._serialized_start=6963 + _globals['_ORCHESTRATIONSTATE']._serialized_end=7639 + _globals['_RAISEEVENTREQUEST']._serialized_start=7641 + _globals['_RAISEEVENTREQUEST']._serialized_end=7739 + _globals['_RAISEEVENTRESPONSE']._serialized_start=7741 + _globals['_RAISEEVENTRESPONSE']._serialized_end=7761 + _globals['_TERMINATEREQUEST']._serialized_start=7763 + _globals['_TERMINATEREQUEST']._serialized_end=7866 + _globals['_TERMINATERESPONSE']._serialized_start=7868 + _globals['_TERMINATERESPONSE']._serialized_end=7887 + _globals['_SUSPENDREQUEST']._serialized_start=7889 + _globals['_SUSPENDREQUEST']._serialized_end=7971 + _globals['_SUSPENDRESPONSE']._serialized_start=7973 + _globals['_SUSPENDRESPONSE']._serialized_end=7990 + _globals['_RESUMEREQUEST']._serialized_start=7992 + _globals['_RESUMEREQUEST']._serialized_end=8073 + _globals['_RESUMERESPONSE']._serialized_start=8075 + _globals['_RESUMERESPONSE']._serialized_end=8091 + _globals['_QUERYINSTANCESREQUEST']._serialized_start=8093 + _globals['_QUERYINSTANCESREQUEST']._serialized_end=8147 + _globals['_INSTANCEQUERY']._serialized_start=8150 + _globals['_INSTANCEQUERY']._serialized_end=8536 + _globals['_QUERYINSTANCESRESPONSE']._serialized_start=8539 + _globals['_QUERYINSTANCESRESPONSE']._serialized_end=8669 + _globals['_PURGEINSTANCESREQUEST']._serialized_start=8672 + _globals['_PURGEINSTANCESREQUEST']._serialized_end=8800 + _globals['_PURGEINSTANCEFILTER']._serialized_start=8803 + _globals['_PURGEINSTANCEFILTER']._serialized_end=8973 + _globals['_PURGEINSTANCESRESPONSE']._serialized_start=8975 + _globals['_PURGEINSTANCESRESPONSE']._serialized_end=9029 + _globals['_CREATETASKHUBREQUEST']._serialized_start=9031 + _globals['_CREATETASKHUBREQUEST']._serialized_end=9079 + _globals['_CREATETASKHUBRESPONSE']._serialized_start=9081 + _globals['_CREATETASKHUBRESPONSE']._serialized_end=9104 + _globals['_DELETETASKHUBREQUEST']._serialized_start=9106 + _globals['_DELETETASKHUBREQUEST']._serialized_end=9128 + _globals['_DELETETASKHUBRESPONSE']._serialized_start=9130 + _globals['_DELETETASKHUBRESPONSE']._serialized_end=9153 + _globals['_SIGNALENTITYREQUEST']._serialized_start=9156 + _globals['_SIGNALENTITYREQUEST']._serialized_end=9326 + _globals['_SIGNALENTITYRESPONSE']._serialized_start=9328 + _globals['_SIGNALENTITYRESPONSE']._serialized_end=9350 + _globals['_GETENTITYREQUEST']._serialized_start=9352 + _globals['_GETENTITYREQUEST']._serialized_end=9412 + _globals['_GETENTITYRESPONSE']._serialized_start=9414 + _globals['_GETENTITYRESPONSE']._serialized_end=9482 + _globals['_ENTITYQUERY']._serialized_start=9485 + _globals['_ENTITYQUERY']._serialized_end=9816 + _globals['_QUERYENTITIESREQUEST']._serialized_start=9818 + _globals['_QUERYENTITIESREQUEST']._serialized_end=9869 + _globals['_QUERYENTITIESRESPONSE']._serialized_start=9871 + _globals['_QUERYENTITIESRESPONSE']._serialized_end=9986 + _globals['_ENTITYMETADATA']._serialized_start=9989 + _globals['_ENTITYMETADATA']._serialized_end=10208 + _globals['_CLEANENTITYSTORAGEREQUEST']._serialized_start=10211 + _globals['_CLEANENTITYSTORAGEREQUEST']._serialized_end=10354 + _globals['_CLEANENTITYSTORAGERESPONSE']._serialized_start=10357 + _globals['_CLEANENTITYSTORAGERESPONSE']._serialized_end=10503 + _globals['_ORCHESTRATORENTITYPARAMETERS']._serialized_start=10505 + _globals['_ORCHESTRATORENTITYPARAMETERS']._serialized_end=10598 + _globals['_ENTITYBATCHREQUEST']._serialized_start=10601 + _globals['_ENTITYBATCHREQUEST']._serialized_end=10731 + _globals['_ENTITYBATCHRESULT']._serialized_start=10734 + _globals['_ENTITYBATCHRESULT']._serialized_end=10919 + _globals['_OPERATIONREQUEST']._serialized_start=10921 + _globals['_OPERATIONREQUEST']._serialized_end=11022 + _globals['_OPERATIONRESULT']._serialized_start=11024 + _globals['_OPERATIONRESULT']._serialized_end=11143 + _globals['_OPERATIONRESULTSUCCESS']._serialized_start=11145 + _globals['_OPERATIONRESULTSUCCESS']._serialized_end=11215 + _globals['_OPERATIONRESULTFAILURE']._serialized_start=11217 + _globals['_OPERATIONRESULTFAILURE']._serialized_end=11286 + _globals['_OPERATIONACTION']._serialized_start=11289 + _globals['_OPERATIONACTION']._serialized_end=11445 + _globals['_SENDSIGNALACTION']._serialized_start=11448 + _globals['_SENDSIGNALACTION']._serialized_end=11596 + _globals['_STARTNEWORCHESTRATIONACTION']._serialized_start=11599 + _globals['_STARTNEWORCHESTRATIONACTION']._serialized_end=11805 + _globals['_GETWORKITEMSREQUEST']._serialized_start=11807 + _globals['_GETWORKITEMSREQUEST']._serialized_end=11828 + _globals['_WORKITEM']._serialized_start=11831 + _globals['_WORKITEM']._serialized_end=12056 + _globals['_COMPLETETASKRESPONSE']._serialized_start=12058 + _globals['_COMPLETETASKRESPONSE']._serialized_end=12080 + _globals['_HEALTHPING']._serialized_start=12082 + _globals['_HEALTHPING']._serialized_end=12094 + _globals['_TASKHUBSIDECARSERVICE']._serialized_start=12476 + _globals['_TASKHUBSIDECARSERVICE']._serialized_end=13880 # @@protoc_insertion_point(module_scope) diff --git a/durabletask/internal/orchestrator_service_pb2_grpc.py b/durabletask/internal/orchestrator_service_pb2_grpc.py index f11cf4b..3638bf6 100644 --- a/durabletask/internal/orchestrator_service_pb2_grpc.py +++ b/durabletask/internal/orchestrator_service_pb2_grpc.py @@ -1,32 +1,10 @@ # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! """Client and server classes corresponding to protobuf-defined services.""" import grpc -import warnings +from durabletask.internal import orchestrator_service_pb2 as durabletask_dot_internal_dot_orchestrator__service__pb2 from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -# TODO: This is a manual edit. Need to figure out how to not manually edit this file. -import durabletask.internal.orchestrator_service_pb2 as orchestrator__service__pb2 - -GRPC_GENERATED_VERSION = '1.67.0' -GRPC_VERSION = grpc.__version__ -_version_not_supported = False - -try: - from grpc._utilities import first_version_is_lower - _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION) -except ImportError: - _version_not_supported = True - -if _version_not_supported: - raise RuntimeError( - f'The grpc package installed is at version {GRPC_VERSION},' - + f' but the generated code in orchestrator_service_pb2_grpc.py depends on' - + f' grpcio>={GRPC_GENERATED_VERSION}.' - + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}' - + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.' - ) - class TaskHubSidecarServiceStub(object): """Missing associated documentation comment in .proto file.""" @@ -41,112 +19,112 @@ def __init__(self, channel): '/TaskHubSidecarService/Hello', request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - _registered_method=True) + ) self.StartInstance = channel.unary_unary( '/TaskHubSidecarService/StartInstance', - request_serializer=orchestrator__service__pb2.CreateInstanceRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.CreateInstanceResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CreateInstanceRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CreateInstanceResponse.FromString, + ) self.GetInstance = channel.unary_unary( '/TaskHubSidecarService/GetInstance', - request_serializer=orchestrator__service__pb2.GetInstanceRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.GetInstanceResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceResponse.FromString, + ) self.RewindInstance = channel.unary_unary( '/TaskHubSidecarService/RewindInstance', - request_serializer=orchestrator__service__pb2.RewindInstanceRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.RewindInstanceResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RewindInstanceRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RewindInstanceResponse.FromString, + ) self.WaitForInstanceStart = channel.unary_unary( '/TaskHubSidecarService/WaitForInstanceStart', - request_serializer=orchestrator__service__pb2.GetInstanceRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.GetInstanceResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceResponse.FromString, + ) self.WaitForInstanceCompletion = channel.unary_unary( '/TaskHubSidecarService/WaitForInstanceCompletion', - request_serializer=orchestrator__service__pb2.GetInstanceRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.GetInstanceResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceResponse.FromString, + ) self.RaiseEvent = channel.unary_unary( '/TaskHubSidecarService/RaiseEvent', - request_serializer=orchestrator__service__pb2.RaiseEventRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.RaiseEventResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RaiseEventRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RaiseEventResponse.FromString, + ) self.TerminateInstance = channel.unary_unary( '/TaskHubSidecarService/TerminateInstance', - request_serializer=orchestrator__service__pb2.TerminateRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.TerminateResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.TerminateRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.TerminateResponse.FromString, + ) self.SuspendInstance = channel.unary_unary( '/TaskHubSidecarService/SuspendInstance', - request_serializer=orchestrator__service__pb2.SuspendRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.SuspendResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.SuspendRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.SuspendResponse.FromString, + ) self.ResumeInstance = channel.unary_unary( '/TaskHubSidecarService/ResumeInstance', - request_serializer=orchestrator__service__pb2.ResumeRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.ResumeResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.ResumeRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.ResumeResponse.FromString, + ) self.QueryInstances = channel.unary_unary( '/TaskHubSidecarService/QueryInstances', - request_serializer=orchestrator__service__pb2.QueryInstancesRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.QueryInstancesResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.QueryInstancesRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.QueryInstancesResponse.FromString, + ) self.PurgeInstances = channel.unary_unary( '/TaskHubSidecarService/PurgeInstances', - request_serializer=orchestrator__service__pb2.PurgeInstancesRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.PurgeInstancesResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.PurgeInstancesRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.PurgeInstancesResponse.FromString, + ) self.GetWorkItems = channel.unary_stream( '/TaskHubSidecarService/GetWorkItems', - request_serializer=orchestrator__service__pb2.GetWorkItemsRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.WorkItem.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetWorkItemsRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.WorkItem.FromString, + ) self.CompleteActivityTask = channel.unary_unary( '/TaskHubSidecarService/CompleteActivityTask', - request_serializer=orchestrator__service__pb2.ActivityResponse.SerializeToString, - response_deserializer=orchestrator__service__pb2.CompleteTaskResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.ActivityResponse.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CompleteTaskResponse.FromString, + ) self.CompleteOrchestratorTask = channel.unary_unary( '/TaskHubSidecarService/CompleteOrchestratorTask', - request_serializer=orchestrator__service__pb2.OrchestratorResponse.SerializeToString, - response_deserializer=orchestrator__service__pb2.CompleteTaskResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.OrchestratorResponse.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CompleteTaskResponse.FromString, + ) self.CompleteEntityTask = channel.unary_unary( '/TaskHubSidecarService/CompleteEntityTask', - request_serializer=orchestrator__service__pb2.EntityBatchResult.SerializeToString, - response_deserializer=orchestrator__service__pb2.CompleteTaskResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.EntityBatchResult.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CompleteTaskResponse.FromString, + ) self.CreateTaskHub = channel.unary_unary( '/TaskHubSidecarService/CreateTaskHub', - request_serializer=orchestrator__service__pb2.CreateTaskHubRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.CreateTaskHubResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CreateTaskHubRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CreateTaskHubResponse.FromString, + ) self.DeleteTaskHub = channel.unary_unary( '/TaskHubSidecarService/DeleteTaskHub', - request_serializer=orchestrator__service__pb2.DeleteTaskHubRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.DeleteTaskHubResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.DeleteTaskHubRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.DeleteTaskHubResponse.FromString, + ) self.SignalEntity = channel.unary_unary( '/TaskHubSidecarService/SignalEntity', - request_serializer=orchestrator__service__pb2.SignalEntityRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.SignalEntityResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.SignalEntityRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.SignalEntityResponse.FromString, + ) self.GetEntity = channel.unary_unary( '/TaskHubSidecarService/GetEntity', - request_serializer=orchestrator__service__pb2.GetEntityRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.GetEntityResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetEntityRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetEntityResponse.FromString, + ) self.QueryEntities = channel.unary_unary( '/TaskHubSidecarService/QueryEntities', - request_serializer=orchestrator__service__pb2.QueryEntitiesRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.QueryEntitiesResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.QueryEntitiesRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.QueryEntitiesResponse.FromString, + ) self.CleanEntityStorage = channel.unary_unary( '/TaskHubSidecarService/CleanEntityStorage', - request_serializer=orchestrator__service__pb2.CleanEntityStorageRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.CleanEntityStorageResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CleanEntityStorageRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CleanEntityStorageResponse.FromString, + ) class TaskHubSidecarServiceServicer(object): @@ -312,114 +290,113 @@ def add_TaskHubSidecarServiceServicer_to_server(servicer, server): ), 'StartInstance': grpc.unary_unary_rpc_method_handler( servicer.StartInstance, - request_deserializer=orchestrator__service__pb2.CreateInstanceRequest.FromString, - response_serializer=orchestrator__service__pb2.CreateInstanceResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CreateInstanceRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CreateInstanceResponse.SerializeToString, ), 'GetInstance': grpc.unary_unary_rpc_method_handler( servicer.GetInstance, - request_deserializer=orchestrator__service__pb2.GetInstanceRequest.FromString, - response_serializer=orchestrator__service__pb2.GetInstanceResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceResponse.SerializeToString, ), 'RewindInstance': grpc.unary_unary_rpc_method_handler( servicer.RewindInstance, - request_deserializer=orchestrator__service__pb2.RewindInstanceRequest.FromString, - response_serializer=orchestrator__service__pb2.RewindInstanceResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RewindInstanceRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RewindInstanceResponse.SerializeToString, ), 'WaitForInstanceStart': grpc.unary_unary_rpc_method_handler( servicer.WaitForInstanceStart, - request_deserializer=orchestrator__service__pb2.GetInstanceRequest.FromString, - response_serializer=orchestrator__service__pb2.GetInstanceResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceResponse.SerializeToString, ), 'WaitForInstanceCompletion': grpc.unary_unary_rpc_method_handler( servicer.WaitForInstanceCompletion, - request_deserializer=orchestrator__service__pb2.GetInstanceRequest.FromString, - response_serializer=orchestrator__service__pb2.GetInstanceResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceResponse.SerializeToString, ), 'RaiseEvent': grpc.unary_unary_rpc_method_handler( servicer.RaiseEvent, - request_deserializer=orchestrator__service__pb2.RaiseEventRequest.FromString, - response_serializer=orchestrator__service__pb2.RaiseEventResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RaiseEventRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RaiseEventResponse.SerializeToString, ), 'TerminateInstance': grpc.unary_unary_rpc_method_handler( servicer.TerminateInstance, - request_deserializer=orchestrator__service__pb2.TerminateRequest.FromString, - response_serializer=orchestrator__service__pb2.TerminateResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.TerminateRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.TerminateResponse.SerializeToString, ), 'SuspendInstance': grpc.unary_unary_rpc_method_handler( servicer.SuspendInstance, - request_deserializer=orchestrator__service__pb2.SuspendRequest.FromString, - response_serializer=orchestrator__service__pb2.SuspendResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.SuspendRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.SuspendResponse.SerializeToString, ), 'ResumeInstance': grpc.unary_unary_rpc_method_handler( servicer.ResumeInstance, - request_deserializer=orchestrator__service__pb2.ResumeRequest.FromString, - response_serializer=orchestrator__service__pb2.ResumeResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.ResumeRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.ResumeResponse.SerializeToString, ), 'QueryInstances': grpc.unary_unary_rpc_method_handler( servicer.QueryInstances, - request_deserializer=orchestrator__service__pb2.QueryInstancesRequest.FromString, - response_serializer=orchestrator__service__pb2.QueryInstancesResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.QueryInstancesRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.QueryInstancesResponse.SerializeToString, ), 'PurgeInstances': grpc.unary_unary_rpc_method_handler( servicer.PurgeInstances, - request_deserializer=orchestrator__service__pb2.PurgeInstancesRequest.FromString, - response_serializer=orchestrator__service__pb2.PurgeInstancesResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.PurgeInstancesRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.PurgeInstancesResponse.SerializeToString, ), 'GetWorkItems': grpc.unary_stream_rpc_method_handler( servicer.GetWorkItems, - request_deserializer=orchestrator__service__pb2.GetWorkItemsRequest.FromString, - response_serializer=orchestrator__service__pb2.WorkItem.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetWorkItemsRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.WorkItem.SerializeToString, ), 'CompleteActivityTask': grpc.unary_unary_rpc_method_handler( servicer.CompleteActivityTask, - request_deserializer=orchestrator__service__pb2.ActivityResponse.FromString, - response_serializer=orchestrator__service__pb2.CompleteTaskResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.ActivityResponse.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CompleteTaskResponse.SerializeToString, ), 'CompleteOrchestratorTask': grpc.unary_unary_rpc_method_handler( servicer.CompleteOrchestratorTask, - request_deserializer=orchestrator__service__pb2.OrchestratorResponse.FromString, - response_serializer=orchestrator__service__pb2.CompleteTaskResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.OrchestratorResponse.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CompleteTaskResponse.SerializeToString, ), 'CompleteEntityTask': grpc.unary_unary_rpc_method_handler( servicer.CompleteEntityTask, - request_deserializer=orchestrator__service__pb2.EntityBatchResult.FromString, - response_serializer=orchestrator__service__pb2.CompleteTaskResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.EntityBatchResult.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CompleteTaskResponse.SerializeToString, ), 'CreateTaskHub': grpc.unary_unary_rpc_method_handler( servicer.CreateTaskHub, - request_deserializer=orchestrator__service__pb2.CreateTaskHubRequest.FromString, - response_serializer=orchestrator__service__pb2.CreateTaskHubResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CreateTaskHubRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CreateTaskHubResponse.SerializeToString, ), 'DeleteTaskHub': grpc.unary_unary_rpc_method_handler( servicer.DeleteTaskHub, - request_deserializer=orchestrator__service__pb2.DeleteTaskHubRequest.FromString, - response_serializer=orchestrator__service__pb2.DeleteTaskHubResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.DeleteTaskHubRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.DeleteTaskHubResponse.SerializeToString, ), 'SignalEntity': grpc.unary_unary_rpc_method_handler( servicer.SignalEntity, - request_deserializer=orchestrator__service__pb2.SignalEntityRequest.FromString, - response_serializer=orchestrator__service__pb2.SignalEntityResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.SignalEntityRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.SignalEntityResponse.SerializeToString, ), 'GetEntity': grpc.unary_unary_rpc_method_handler( servicer.GetEntity, - request_deserializer=orchestrator__service__pb2.GetEntityRequest.FromString, - response_serializer=orchestrator__service__pb2.GetEntityResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetEntityRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetEntityResponse.SerializeToString, ), 'QueryEntities': grpc.unary_unary_rpc_method_handler( servicer.QueryEntities, - request_deserializer=orchestrator__service__pb2.QueryEntitiesRequest.FromString, - response_serializer=orchestrator__service__pb2.QueryEntitiesResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.QueryEntitiesRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.QueryEntitiesResponse.SerializeToString, ), 'CleanEntityStorage': grpc.unary_unary_rpc_method_handler( servicer.CleanEntityStorage, - request_deserializer=orchestrator__service__pb2.CleanEntityStorageRequest.FromString, - response_serializer=orchestrator__service__pb2.CleanEntityStorageResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CleanEntityStorageRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CleanEntityStorageResponse.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'TaskHubSidecarService', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) - server.add_registered_method_handlers('TaskHubSidecarService', rpc_method_handlers) # This class is part of an EXPERIMENTAL API. @@ -437,21 +414,11 @@ def Hello(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/Hello', + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/Hello', google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def StartInstance(request, @@ -464,21 +431,11 @@ def StartInstance(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/StartInstance', - orchestrator__service__pb2.CreateInstanceRequest.SerializeToString, - orchestrator__service__pb2.CreateInstanceResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/StartInstance', + durabletask_dot_internal_dot_orchestrator__service__pb2.CreateInstanceRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.CreateInstanceResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def GetInstance(request, @@ -491,21 +448,11 @@ def GetInstance(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/GetInstance', - orchestrator__service__pb2.GetInstanceRequest.SerializeToString, - orchestrator__service__pb2.GetInstanceResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/GetInstance', + durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def RewindInstance(request, @@ -518,21 +465,11 @@ def RewindInstance(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/RewindInstance', - orchestrator__service__pb2.RewindInstanceRequest.SerializeToString, - orchestrator__service__pb2.RewindInstanceResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/RewindInstance', + durabletask_dot_internal_dot_orchestrator__service__pb2.RewindInstanceRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.RewindInstanceResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def WaitForInstanceStart(request, @@ -545,21 +482,11 @@ def WaitForInstanceStart(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/WaitForInstanceStart', - orchestrator__service__pb2.GetInstanceRequest.SerializeToString, - orchestrator__service__pb2.GetInstanceResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/WaitForInstanceStart', + durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def WaitForInstanceCompletion(request, @@ -572,21 +499,11 @@ def WaitForInstanceCompletion(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/WaitForInstanceCompletion', - orchestrator__service__pb2.GetInstanceRequest.SerializeToString, - orchestrator__service__pb2.GetInstanceResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/WaitForInstanceCompletion', + durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def RaiseEvent(request, @@ -599,21 +516,11 @@ def RaiseEvent(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/RaiseEvent', - orchestrator__service__pb2.RaiseEventRequest.SerializeToString, - orchestrator__service__pb2.RaiseEventResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/RaiseEvent', + durabletask_dot_internal_dot_orchestrator__service__pb2.RaiseEventRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.RaiseEventResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def TerminateInstance(request, @@ -626,21 +533,11 @@ def TerminateInstance(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/TerminateInstance', - orchestrator__service__pb2.TerminateRequest.SerializeToString, - orchestrator__service__pb2.TerminateResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/TerminateInstance', + durabletask_dot_internal_dot_orchestrator__service__pb2.TerminateRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.TerminateResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def SuspendInstance(request, @@ -653,21 +550,11 @@ def SuspendInstance(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/SuspendInstance', - orchestrator__service__pb2.SuspendRequest.SerializeToString, - orchestrator__service__pb2.SuspendResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/SuspendInstance', + durabletask_dot_internal_dot_orchestrator__service__pb2.SuspendRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.SuspendResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def ResumeInstance(request, @@ -680,21 +567,11 @@ def ResumeInstance(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/ResumeInstance', - orchestrator__service__pb2.ResumeRequest.SerializeToString, - orchestrator__service__pb2.ResumeResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/ResumeInstance', + durabletask_dot_internal_dot_orchestrator__service__pb2.ResumeRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.ResumeResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def QueryInstances(request, @@ -707,21 +584,11 @@ def QueryInstances(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/QueryInstances', - orchestrator__service__pb2.QueryInstancesRequest.SerializeToString, - orchestrator__service__pb2.QueryInstancesResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/QueryInstances', + durabletask_dot_internal_dot_orchestrator__service__pb2.QueryInstancesRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.QueryInstancesResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def PurgeInstances(request, @@ -734,21 +601,11 @@ def PurgeInstances(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/PurgeInstances', - orchestrator__service__pb2.PurgeInstancesRequest.SerializeToString, - orchestrator__service__pb2.PurgeInstancesResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/PurgeInstances', + durabletask_dot_internal_dot_orchestrator__service__pb2.PurgeInstancesRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.PurgeInstancesResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def GetWorkItems(request, @@ -761,21 +618,11 @@ def GetWorkItems(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_stream( - request, - target, - '/TaskHubSidecarService/GetWorkItems', - orchestrator__service__pb2.GetWorkItemsRequest.SerializeToString, - orchestrator__service__pb2.WorkItem.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_stream(request, target, '/TaskHubSidecarService/GetWorkItems', + durabletask_dot_internal_dot_orchestrator__service__pb2.GetWorkItemsRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.WorkItem.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def CompleteActivityTask(request, @@ -788,21 +635,11 @@ def CompleteActivityTask(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/CompleteActivityTask', - orchestrator__service__pb2.ActivityResponse.SerializeToString, - orchestrator__service__pb2.CompleteTaskResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/CompleteActivityTask', + durabletask_dot_internal_dot_orchestrator__service__pb2.ActivityResponse.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.CompleteTaskResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def CompleteOrchestratorTask(request, @@ -815,21 +652,11 @@ def CompleteOrchestratorTask(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/CompleteOrchestratorTask', - orchestrator__service__pb2.OrchestratorResponse.SerializeToString, - orchestrator__service__pb2.CompleteTaskResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/CompleteOrchestratorTask', + durabletask_dot_internal_dot_orchestrator__service__pb2.OrchestratorResponse.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.CompleteTaskResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def CompleteEntityTask(request, @@ -842,21 +669,11 @@ def CompleteEntityTask(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/CompleteEntityTask', - orchestrator__service__pb2.EntityBatchResult.SerializeToString, - orchestrator__service__pb2.CompleteTaskResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/CompleteEntityTask', + durabletask_dot_internal_dot_orchestrator__service__pb2.EntityBatchResult.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.CompleteTaskResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def CreateTaskHub(request, @@ -869,21 +686,11 @@ def CreateTaskHub(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/CreateTaskHub', - orchestrator__service__pb2.CreateTaskHubRequest.SerializeToString, - orchestrator__service__pb2.CreateTaskHubResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/CreateTaskHub', + durabletask_dot_internal_dot_orchestrator__service__pb2.CreateTaskHubRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.CreateTaskHubResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def DeleteTaskHub(request, @@ -896,21 +703,11 @@ def DeleteTaskHub(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/DeleteTaskHub', - orchestrator__service__pb2.DeleteTaskHubRequest.SerializeToString, - orchestrator__service__pb2.DeleteTaskHubResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/DeleteTaskHub', + durabletask_dot_internal_dot_orchestrator__service__pb2.DeleteTaskHubRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.DeleteTaskHubResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def SignalEntity(request, @@ -923,21 +720,11 @@ def SignalEntity(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/SignalEntity', - orchestrator__service__pb2.SignalEntityRequest.SerializeToString, - orchestrator__service__pb2.SignalEntityResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/SignalEntity', + durabletask_dot_internal_dot_orchestrator__service__pb2.SignalEntityRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.SignalEntityResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def GetEntity(request, @@ -950,21 +737,11 @@ def GetEntity(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/GetEntity', - orchestrator__service__pb2.GetEntityRequest.SerializeToString, - orchestrator__service__pb2.GetEntityResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/GetEntity', + durabletask_dot_internal_dot_orchestrator__service__pb2.GetEntityRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.GetEntityResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def QueryEntities(request, @@ -977,21 +754,11 @@ def QueryEntities(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/QueryEntities', - orchestrator__service__pb2.QueryEntitiesRequest.SerializeToString, - orchestrator__service__pb2.QueryEntitiesResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/QueryEntities', + durabletask_dot_internal_dot_orchestrator__service__pb2.QueryEntitiesRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.QueryEntitiesResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def CleanEntityStorage(request, @@ -1004,18 +771,8 @@ def CleanEntityStorage(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/CleanEntityStorage', - orchestrator__service__pb2.CleanEntityStorageRequest.SerializeToString, - orchestrator__service__pb2.CleanEntityStorageResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/CleanEntityStorage', + durabletask_dot_internal_dot_orchestrator__service__pb2.CleanEntityStorageRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.CleanEntityStorageResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/requirements.txt b/requirements.txt index af76d88..a31419b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,5 @@ autopep8 -grpcio -grpcio-tools +grpcio>=1.60.0 # 1.60.0 is the version introducing protobuf 1.25.X support, newer versions are backwards compatible protobuf pytest -pytest-cov \ No newline at end of file +pytest-cov From 2466e7d1a859a06e3ee26ccb293bbbac03369730 Mon Sep 17 00:00:00 2001 From: Bernd Verst Date: Fri, 10 Jan 2025 13:01:15 -0800 Subject: [PATCH 03/20] Remove protocol prefix from host name and auto-configure secure mode (#38) Signed-off-by: Albert Callarisa --- CHANGELOG.md | 3 ++- README.md | 2 +- durabletask/internal/shared.py | 17 ++++++++++++ tests/test_client.py | 49 +++++++++++++++++++++++++++++++++- 4 files changed, 68 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a09078d..286312c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,7 +14,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changes -- Protos are compiled with gRPC 1.62.3 / protobuf 3.25.X instead of the latest release. This ensures compatibility with a wider range of grpcio versions for better compatibility with other packages / libraries. +- Protos are compiled with gRPC 1.62.3 / protobuf 3.25.X instead of the latest release. This ensures compatibility with a wider range of grpcio versions for better compatibility with other packages / libraries ([#36](https://github.com/microsoft/durabletask-python/pull/36)) - by [@berndverst](https://github.com/berndverst) +- Http and grpc protocols and their secure variants are stripped from the host name parameter if provided. Secure mode is enabled if the protocol provided is https or grpcs ([#38](https://github.com/microsoft/durabletask-python/pull/38) - by [@berndverst)(https://github.com/berndverst) ### Updates diff --git a/README.md b/README.md index 81b5a54..420d75f 100644 --- a/README.md +++ b/README.md @@ -134,7 +134,7 @@ Orchestrations can specify retry policies for activities and sub-orchestrations. ### Prerequisites -- Python 3.8 +- Python 3.9 - A Durable Task-compatible sidecar, like [Dapr Workflow](https://docs.dapr.io/developing-applications/building-blocks/workflow/workflow-overview/) ### Installing the Durable Task Python client SDK diff --git a/durabletask/internal/shared.py b/durabletask/internal/shared.py index 400529a..c4f3aa4 100644 --- a/durabletask/internal/shared.py +++ b/durabletask/internal/shared.py @@ -15,6 +15,9 @@ # and should be deserialized as a SimpleNamespace AUTO_SERIALIZED = "__durabletask_autoobject__" +SECURE_PROTOCOLS = ["https://", "grpcs://"] +INSECURE_PROTOCOLS = ["http://", "grpc://"] + def get_default_host_address() -> str: return "localhost:4001" @@ -27,6 +30,20 @@ def get_grpc_channel( if host_address is None: host_address = get_default_host_address() + for protocol in SECURE_PROTOCOLS: + if host_address.lower().startswith(protocol): + secure_channel = True + # remove the protocol from the host name + host_address = host_address[len(protocol):] + break + + for protocol in INSECURE_PROTOCOLS: + if host_address.lower().startswith(protocol): + secure_channel = False + # remove the protocol from the host name + host_address = host_address[len(protocol):] + break + if secure_channel: channel = grpc.secure_channel(host_address, grpc.ssl_channel_credentials()) else: diff --git a/tests/test_client.py b/tests/test_client.py index b27f8e3..caacf65 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -1,4 +1,4 @@ -from unittest.mock import patch +from unittest.mock import patch, ANY from durabletask.internal.shared import (DefaultClientInterceptorImpl, get_default_host_address, @@ -39,3 +39,50 @@ def test_get_grpc_channel_with_metadata(): assert args[0] == mock_channel.return_value assert isinstance(args[1], DefaultClientInterceptorImpl) assert args[1]._metadata == METADATA + + +def test_grpc_channel_with_host_name_protocol_stripping(): + with patch('grpc.insecure_channel') as mock_insecure_channel, patch( + 'grpc.secure_channel') as mock_secure_channel: + + host_name = "myserver.com:1234" + + prefix = "grpc://" + get_grpc_channel(prefix + host_name, METADATA) + mock_insecure_channel.assert_called_with(host_name) + + prefix = "http://" + get_grpc_channel(prefix + host_name, METADATA) + mock_insecure_channel.assert_called_with(host_name) + + prefix = "HTTP://" + get_grpc_channel(prefix + host_name, METADATA) + mock_insecure_channel.assert_called_with(host_name) + + prefix = "GRPC://" + get_grpc_channel(prefix + host_name, METADATA) + mock_insecure_channel.assert_called_with(host_name) + + prefix = "" + get_grpc_channel(prefix + host_name, METADATA) + mock_insecure_channel.assert_called_with(host_name) + + prefix = "grpcs://" + get_grpc_channel(prefix + host_name, METADATA) + mock_secure_channel.assert_called_with(host_name, ANY) + + prefix = "https://" + get_grpc_channel(prefix + host_name, METADATA) + mock_secure_channel.assert_called_with(host_name, ANY) + + prefix = "HTTPS://" + get_grpc_channel(prefix + host_name, METADATA) + mock_secure_channel.assert_called_with(host_name, ANY) + + prefix = "GRPCS://" + get_grpc_channel(prefix + host_name, METADATA) + mock_secure_channel.assert_called_with(host_name, ANY) + + prefix = "" + get_grpc_channel(prefix + host_name, METADATA, True) + mock_secure_channel.assert_called_with(host_name, ANY) \ No newline at end of file From 551cb02757918e0a4a47d572edd5723bcc20b8c4 Mon Sep 17 00:00:00 2001 From: Bernd Verst Date: Fri, 17 Jan 2025 09:39:03 -0800 Subject: [PATCH 04/20] Improve Proto Generation: Download proto file directly instead of via submodule (#39) Signed-off-by: Albert Callarisa --- CHANGELOG.md | 1 + Makefile | 3 +- README.md | 10 +- durabletask/internal/PROTO_SOURCE_COMMIT_HASH | 1 + .../internal/orchestrator_service_pb2.py | 352 +++++++++--------- .../internal/orchestrator_service_pb2.pyi | 20 +- submodules/durabletask-protobuf | 1 - 7 files changed, 196 insertions(+), 192 deletions(-) create mode 100644 durabletask/internal/PROTO_SOURCE_COMMIT_HASH delete mode 160000 submodules/durabletask-protobuf diff --git a/CHANGELOG.md b/CHANGELOG.md index 286312c..ee736f0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Protos are compiled with gRPC 1.62.3 / protobuf 3.25.X instead of the latest release. This ensures compatibility with a wider range of grpcio versions for better compatibility with other packages / libraries ([#36](https://github.com/microsoft/durabletask-python/pull/36)) - by [@berndverst](https://github.com/berndverst) - Http and grpc protocols and their secure variants are stripped from the host name parameter if provided. Secure mode is enabled if the protocol provided is https or grpcs ([#38](https://github.com/microsoft/durabletask-python/pull/38) - by [@berndverst)(https://github.com/berndverst) +- Improve ProtoGen by downloading proto file directly instead of using submodule ([#39](https://github.com/microsoft/durabletask-python/pull/39) - by [@berndverst](https://github.com/berndverst) ### Updates diff --git a/Makefile b/Makefile index 68a9b89..5a05f33 100644 --- a/Makefile +++ b/Makefile @@ -11,7 +11,8 @@ install: python3 -m pip install . gen-proto: - cp ./submodules/durabletask-protobuf/protos/orchestrator_service.proto durabletask/internal/orchestrator_service.proto + curl -o durabletask/internal/orchestrator_service.proto https://raw.githubusercontent.com/microsoft/durabletask-protobuf/refs/heads/main/protos/orchestrator_service.proto + curl -H "Accept: application/vnd.github.v3+json" "https://api.github.com/repos/microsoft/durabletask-protobuf/commits?path=protos/orchestrator_service.proto&sha=main&per_page=1" | jq -r '.[0].sha' >> durabletask/internal/PROTO_SOURCE_COMMIT_HASH python3 -m grpc_tools.protoc --proto_path=. --python_out=. --pyi_out=. --grpc_python_out=. ./durabletask/internal/orchestrator_service.proto rm durabletask/internal/*.proto diff --git a/README.md b/README.md index 420d75f..644635e 100644 --- a/README.md +++ b/README.md @@ -161,19 +161,13 @@ The following is more information about how to develop this project. Note that d ### Generating protobufs -Protobuf definitions are stored in the [./submodules/durabletask-proto](./submodules/durabletask-proto) directory, which is a submodule. To update the submodule, run the following command from the project root: - -```sh -git submodule update --init -``` - -Once the submodule is available, the corresponding source code can be regenerated using the following command from the project root: - ```sh pip3 install -r dev-requirements.txt make gen-proto ``` +This will download the `orchestrator_service.proto` from the `microsoft/durabletask-protobuf` repo and compile it using `grpcio-tools`. The version of the source proto file that was downloaded can be found in the file `durabletask/internal/PROTO_SOURCE_COMMIT_HASH`. + ### Running unit tests Unit tests can be run using the following command from the project root. Unit tests _don't_ require a sidecar process to be running. diff --git a/durabletask/internal/PROTO_SOURCE_COMMIT_HASH b/durabletask/internal/PROTO_SOURCE_COMMIT_HASH new file mode 100644 index 0000000..ddbd31a --- /dev/null +++ b/durabletask/internal/PROTO_SOURCE_COMMIT_HASH @@ -0,0 +1 @@ +443b333f4f65a438dc9eb4f090560d232afec4b7 diff --git a/durabletask/internal/orchestrator_service_pb2.py b/durabletask/internal/orchestrator_service_pb2.py index 9c92eac..44b4a32 100644 --- a/durabletask/internal/orchestrator_service_pb2.py +++ b/durabletask/internal/orchestrator_service_pb2.py @@ -18,7 +18,7 @@ from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n/durabletask/internal/orchestrator_service.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1bgoogle/protobuf/empty.proto\"^\n\x15OrchestrationInstance\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xed\x01\n\x0f\x41\x63tivityRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\x12\x0e\n\x06taskId\x18\x05 \x01(\x05\x12)\n\x12parentTraceContext\x18\x06 \x01(\x0b\x32\r.TraceContext\"\x91\x01\n\x10\x41\x63tivityResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0e\n\x06taskId\x18\x02 \x01(\x05\x12,\n\x06result\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\"\xb2\x01\n\x12TaskFailureDetails\x12\x11\n\terrorType\x18\x01 \x01(\t\x12\x14\n\x0c\x65rrorMessage\x18\x02 \x01(\t\x12\x30\n\nstackTrace\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x0cinnerFailure\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\x12\x16\n\x0eisNonRetriable\x18\x05 \x01(\x08\"\xbf\x01\n\x12ParentInstanceInfo\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12*\n\x04name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\"i\n\x0cTraceContext\x12\x13\n\x0btraceParent\x18\x01 \x01(\t\x12\x12\n\x06spanID\x18\x02 \x01(\tB\x02\x18\x01\x12\x30\n\ntraceState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x88\x03\n\x15\x45xecutionStartedEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\x12+\n\x0eparentInstance\x18\x05 \x01(\x0b\x32\x13.ParentInstanceInfo\x12;\n\x17scheduledStartTimestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12)\n\x12parentTraceContext\x18\x07 \x01(\x0b\x32\r.TraceContext\x12\x39\n\x13orchestrationSpanID\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xa7\x01\n\x17\x45xecutionCompletedEvent\x12\x31\n\x13orchestrationStatus\x18\x01 \x01(\x0e\x32\x14.OrchestrationStatus\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x03 \x01(\x0b\x32\x13.TaskFailureDetails\"X\n\x18\x45xecutionTerminatedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x0f\n\x07recurse\x18\x02 \x01(\x08\"\xa9\x01\n\x12TaskScheduledEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x12parentTraceContext\x18\x04 \x01(\x0b\x32\r.TraceContext\"[\n\x12TaskCompletedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"W\n\x0fTaskFailedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\"\xcf\x01\n$SubOrchestrationInstanceCreatedEvent\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x12parentTraceContext\x18\x05 \x01(\x0b\x32\r.TraceContext\"o\n&SubOrchestrationInstanceCompletedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"k\n#SubOrchestrationInstanceFailedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\"?\n\x11TimerCreatedEvent\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"N\n\x0fTimerFiredEvent\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0f\n\x07timerId\x18\x02 \x01(\x05\"\x1a\n\x18OrchestratorStartedEvent\"\x1c\n\x1aOrchestratorCompletedEvent\"_\n\x0e\x45ventSentEvent\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"M\n\x10\x45ventRaisedEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x05input\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\":\n\x0cGenericEvent\x12*\n\x04\x64\x61ta\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"D\n\x11HistoryStateEvent\x12/\n\x12orchestrationState\x18\x01 \x01(\x0b\x32\x13.OrchestrationState\"A\n\x12\x43ontinueAsNewEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"F\n\x17\x45xecutionSuspendedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"D\n\x15\x45xecutionResumedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x86\t\n\x0cHistoryEvent\x12\x0f\n\x07\x65ventId\x18\x01 \x01(\x05\x12-\n\ttimestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x10\x65xecutionStarted\x18\x03 \x01(\x0b\x32\x16.ExecutionStartedEventH\x00\x12\x36\n\x12\x65xecutionCompleted\x18\x04 \x01(\x0b\x32\x18.ExecutionCompletedEventH\x00\x12\x38\n\x13\x65xecutionTerminated\x18\x05 \x01(\x0b\x32\x19.ExecutionTerminatedEventH\x00\x12,\n\rtaskScheduled\x18\x06 \x01(\x0b\x32\x13.TaskScheduledEventH\x00\x12,\n\rtaskCompleted\x18\x07 \x01(\x0b\x32\x13.TaskCompletedEventH\x00\x12&\n\ntaskFailed\x18\x08 \x01(\x0b\x32\x10.TaskFailedEventH\x00\x12P\n\x1fsubOrchestrationInstanceCreated\x18\t \x01(\x0b\x32%.SubOrchestrationInstanceCreatedEventH\x00\x12T\n!subOrchestrationInstanceCompleted\x18\n \x01(\x0b\x32\'.SubOrchestrationInstanceCompletedEventH\x00\x12N\n\x1esubOrchestrationInstanceFailed\x18\x0b \x01(\x0b\x32$.SubOrchestrationInstanceFailedEventH\x00\x12*\n\x0ctimerCreated\x18\x0c \x01(\x0b\x32\x12.TimerCreatedEventH\x00\x12&\n\ntimerFired\x18\r \x01(\x0b\x32\x10.TimerFiredEventH\x00\x12\x38\n\x13orchestratorStarted\x18\x0e \x01(\x0b\x32\x19.OrchestratorStartedEventH\x00\x12<\n\x15orchestratorCompleted\x18\x0f \x01(\x0b\x32\x1b.OrchestratorCompletedEventH\x00\x12$\n\teventSent\x18\x10 \x01(\x0b\x32\x0f.EventSentEventH\x00\x12(\n\x0b\x65ventRaised\x18\x11 \x01(\x0b\x32\x11.EventRaisedEventH\x00\x12%\n\x0cgenericEvent\x18\x12 \x01(\x0b\x32\r.GenericEventH\x00\x12*\n\x0chistoryState\x18\x13 \x01(\x0b\x32\x12.HistoryStateEventH\x00\x12,\n\rcontinueAsNew\x18\x14 \x01(\x0b\x32\x13.ContinueAsNewEventH\x00\x12\x36\n\x12\x65xecutionSuspended\x18\x15 \x01(\x0b\x32\x18.ExecutionSuspendedEventH\x00\x12\x32\n\x10\x65xecutionResumed\x18\x16 \x01(\x0b\x32\x16.ExecutionResumedEventH\x00\x42\x0b\n\teventType\"~\n\x12ScheduleTaskAction\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x9c\x01\n\x1c\x43reateSubOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"?\n\x11\x43reateTimerAction\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"u\n\x0fSendEventAction\x12(\n\x08instance\x18\x01 \x01(\x0b\x32\x16.OrchestrationInstance\x12\x0c\n\x04name\x18\x02 \x01(\t\x12*\n\x04\x64\x61ta\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xb4\x02\n\x1b\x43ompleteOrchestrationAction\x12\x31\n\x13orchestrationStatus\x18\x01 \x01(\x0e\x32\x14.OrchestrationStatus\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07\x64\x65tails\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\nnewVersion\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12&\n\x0f\x63\x61rryoverEvents\x18\x05 \x03(\x0b\x32\r.HistoryEvent\x12+\n\x0e\x66\x61ilureDetails\x18\x06 \x01(\x0b\x32\x13.TaskFailureDetails\"q\n\x1cTerminateOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x0f\n\x07recurse\x18\x03 \x01(\x08\"\xfa\x02\n\x12OrchestratorAction\x12\n\n\x02id\x18\x01 \x01(\x05\x12+\n\x0cscheduleTask\x18\x02 \x01(\x0b\x32\x13.ScheduleTaskActionH\x00\x12?\n\x16\x63reateSubOrchestration\x18\x03 \x01(\x0b\x32\x1d.CreateSubOrchestrationActionH\x00\x12)\n\x0b\x63reateTimer\x18\x04 \x01(\x0b\x32\x12.CreateTimerActionH\x00\x12%\n\tsendEvent\x18\x05 \x01(\x0b\x32\x10.SendEventActionH\x00\x12=\n\x15\x63ompleteOrchestration\x18\x06 \x01(\x0b\x32\x1c.CompleteOrchestrationActionH\x00\x12?\n\x16terminateOrchestration\x18\x07 \x01(\x0b\x32\x1d.TerminateOrchestrationActionH\x00\x42\x18\n\x16orchestratorActionType\"\xda\x01\n\x13OrchestratorRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12!\n\npastEvents\x18\x03 \x03(\x0b\x32\r.HistoryEvent\x12 \n\tnewEvents\x18\x04 \x03(\x0b\x32\r.HistoryEvent\x12\x37\n\x10\x65ntityParameters\x18\x05 \x01(\x0b\x32\x1d.OrchestratorEntityParameters\"\x84\x01\n\x14OrchestratorResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12$\n\x07\x61\x63tions\x18\x02 \x03(\x0b\x32\x13.OrchestratorAction\x12\x32\n\x0c\x63ustomStatus\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xa3\x03\n\x15\x43reateInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12;\n\x17scheduledStartTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12?\n\x1aorchestrationIdReusePolicy\x18\x06 \x01(\x0b\x32\x1b.OrchestrationIdReusePolicy\x12\x31\n\x0b\x65xecutionId\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x04tags\x18\x08 \x03(\x0b\x32 .CreateInstanceRequest.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"w\n\x1aOrchestrationIdReusePolicy\x12-\n\x0foperationStatus\x18\x01 \x03(\x0e\x32\x14.OrchestrationStatus\x12*\n\x06\x61\x63tion\x18\x02 \x01(\x0e\x32\x1a.CreateOrchestrationAction\",\n\x16\x43reateInstanceResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\"E\n\x12GetInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x1b\n\x13getInputsAndOutputs\x18\x02 \x01(\x08\"V\n\x13GetInstanceResponse\x12\x0e\n\x06\x65xists\x18\x01 \x01(\x08\x12/\n\x12orchestrationState\x18\x02 \x01(\x0b\x32\x13.OrchestrationState\"Y\n\x15RewindInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x18\n\x16RewindInstanceResponse\"\xa4\x05\n\x12OrchestrationState\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\x13orchestrationStatus\x18\x04 \x01(\x0e\x32\x14.OrchestrationStatus\x12;\n\x17scheduledStartTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x34\n\x10\x63reatedTimestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x38\n\x14lastUpdatedTimestamp\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\x05input\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12,\n\x06output\x18\t \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0c\x63ustomStatus\x18\n \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x0b \x01(\x0b\x32\x13.TaskFailureDetails\x12\x31\n\x0b\x65xecutionId\x18\x0c \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x12\x63ompletedTimestamp\x18\r \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x36\n\x10parentInstanceId\x18\x0e \x01(\x0b\x32\x1c.google.protobuf.StringValue\"b\n\x11RaiseEventRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x14\n\x12RaiseEventResponse\"g\n\x10TerminateRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06output\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x11\n\trecursive\x18\x03 \x01(\x08\"\x13\n\x11TerminateResponse\"R\n\x0eSuspendRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x11\n\x0fSuspendResponse\"Q\n\rResumeRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x10\n\x0eResumeResponse\"6\n\x15QueryInstancesRequest\x12\x1d\n\x05query\x18\x01 \x01(\x0b\x32\x0e.InstanceQuery\"\x82\x03\n\rInstanceQuery\x12+\n\rruntimeStatus\x18\x01 \x03(\x0e\x32\x14.OrchestrationStatus\x12\x33\n\x0f\x63reatedTimeFrom\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rcreatedTimeTo\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0ctaskHubNames\x18\x04 \x03(\x0b\x32\x1c.google.protobuf.StringValue\x12\x18\n\x10maxInstanceCount\x18\x05 \x01(\x05\x12\x37\n\x11\x63ontinuationToken\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10instanceIdPrefix\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1d\n\x15\x66\x65tchInputsAndOutputs\x18\x08 \x01(\x08\"\x82\x01\n\x16QueryInstancesResponse\x12/\n\x12orchestrationState\x18\x01 \x03(\x0b\x32\x13.OrchestrationState\x12\x37\n\x11\x63ontinuationToken\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x80\x01\n\x15PurgeInstancesRequest\x12\x14\n\ninstanceId\x18\x01 \x01(\tH\x00\x12\x33\n\x13purgeInstanceFilter\x18\x02 \x01(\x0b\x32\x14.PurgeInstanceFilterH\x00\x12\x11\n\trecursive\x18\x03 \x01(\x08\x42\t\n\x07request\"\xaa\x01\n\x13PurgeInstanceFilter\x12\x33\n\x0f\x63reatedTimeFrom\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rcreatedTimeTo\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\rruntimeStatus\x18\x03 \x03(\x0e\x32\x14.OrchestrationStatus\"6\n\x16PurgeInstancesResponse\x12\x1c\n\x14\x64\x65letedInstanceCount\x18\x01 \x01(\x05\"0\n\x14\x43reateTaskHubRequest\x12\x18\n\x10recreateIfExists\x18\x01 \x01(\x08\"\x17\n\x15\x43reateTaskHubResponse\"\x16\n\x14\x44\x65leteTaskHubRequest\"\x17\n\x15\x44\x65leteTaskHubResponse\"\xaa\x01\n\x13SignalEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x11\n\trequestId\x18\x04 \x01(\t\x12\x31\n\rscheduledTime\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\x16\n\x14SignalEntityResponse\"<\n\x10GetEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x14\n\x0cincludeState\x18\x02 \x01(\x08\"D\n\x11GetEntityResponse\x12\x0e\n\x06\x65xists\x18\x01 \x01(\x08\x12\x1f\n\x06\x65ntity\x18\x02 \x01(\x0b\x32\x0f.EntityMetadata\"\xcb\x02\n\x0b\x45ntityQuery\x12:\n\x14instanceIdStartsWith\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x34\n\x10lastModifiedFrom\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0elastModifiedTo\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x14\n\x0cincludeState\x18\x04 \x01(\x08\x12\x18\n\x10includeTransient\x18\x05 \x01(\x08\x12-\n\x08pageSize\x18\x06 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12\x37\n\x11\x63ontinuationToken\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"3\n\x14QueryEntitiesRequest\x12\x1b\n\x05query\x18\x01 \x01(\x0b\x32\x0c.EntityQuery\"s\n\x15QueryEntitiesResponse\x12!\n\x08\x65ntities\x18\x01 \x03(\x0b\x32\x0f.EntityMetadata\x12\x37\n\x11\x63ontinuationToken\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xdb\x01\n\x0e\x45ntityMetadata\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x34\n\x10lastModifiedTime\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x18\n\x10\x62\x61\x63klogQueueSize\x18\x03 \x01(\x05\x12.\n\x08lockedBy\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x0fserializedState\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x8f\x01\n\x19\x43leanEntityStorageRequest\x12\x37\n\x11\x63ontinuationToken\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1b\n\x13removeEmptyEntities\x18\x02 \x01(\x08\x12\x1c\n\x14releaseOrphanedLocks\x18\x03 \x01(\x08\"\x92\x01\n\x1a\x43leanEntityStorageResponse\x12\x37\n\x11\x63ontinuationToken\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1c\n\x14\x65mptyEntitiesRemoved\x18\x02 \x01(\x05\x12\x1d\n\x15orphanedLocksReleased\x18\x03 \x01(\x05\"]\n\x1cOrchestratorEntityParameters\x12=\n\x1a\x65ntityMessageReorderWindow\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\"\x82\x01\n\x12\x45ntityBatchRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65ntityState\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12%\n\noperations\x18\x03 \x03(\x0b\x32\x11.OperationRequest\"\xb9\x01\n\x11\x45ntityBatchResult\x12!\n\x07results\x18\x01 \x03(\x0b\x32\x10.OperationResult\x12!\n\x07\x61\x63tions\x18\x02 \x03(\x0b\x32\x10.OperationAction\x12\x31\n\x0b\x65ntityState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\"e\n\x10OperationRequest\x12\x11\n\toperation\x18\x01 \x01(\t\x12\x11\n\trequestId\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"w\n\x0fOperationResult\x12*\n\x07success\x18\x01 \x01(\x0b\x32\x17.OperationResultSuccessH\x00\x12*\n\x07\x66\x61ilure\x18\x02 \x01(\x0b\x32\x17.OperationResultFailureH\x00\x42\x0c\n\nresultType\"F\n\x16OperationResultSuccess\x12,\n\x06result\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"E\n\x16OperationResultFailure\x12+\n\x0e\x66\x61ilureDetails\x18\x01 \x01(\x0b\x32\x13.TaskFailureDetails\"\x9c\x01\n\x0fOperationAction\x12\n\n\x02id\x18\x01 \x01(\x05\x12\'\n\nsendSignal\x18\x02 \x01(\x0b\x32\x11.SendSignalActionH\x00\x12=\n\x15startNewOrchestration\x18\x03 \x01(\x0b\x32\x1c.StartNewOrchestrationActionH\x00\x42\x15\n\x13operationActionType\"\x94\x01\n\x10SendSignalAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rscheduledTime\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xce\x01\n\x1bStartNewOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rscheduledTime\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\x15\n\x13GetWorkItemsRequest\"\xe1\x01\n\x08WorkItem\x12\x33\n\x13orchestratorRequest\x18\x01 \x01(\x0b\x32\x14.OrchestratorRequestH\x00\x12+\n\x0f\x61\x63tivityRequest\x18\x02 \x01(\x0b\x32\x10.ActivityRequestH\x00\x12,\n\rentityRequest\x18\x03 \x01(\x0b\x32\x13.EntityBatchRequestH\x00\x12!\n\nhealthPing\x18\x04 \x01(\x0b\x32\x0b.HealthPingH\x00\x12\x17\n\x0f\x63ompletionToken\x18\n \x01(\tB\t\n\x07request\"\x16\n\x14\x43ompleteTaskResponse\"\x0c\n\nHealthPing*\xb5\x02\n\x13OrchestrationStatus\x12 \n\x1cORCHESTRATION_STATUS_RUNNING\x10\x00\x12\"\n\x1eORCHESTRATION_STATUS_COMPLETED\x10\x01\x12)\n%ORCHESTRATION_STATUS_CONTINUED_AS_NEW\x10\x02\x12\x1f\n\x1bORCHESTRATION_STATUS_FAILED\x10\x03\x12!\n\x1dORCHESTRATION_STATUS_CANCELED\x10\x04\x12#\n\x1fORCHESTRATION_STATUS_TERMINATED\x10\x05\x12 \n\x1cORCHESTRATION_STATUS_PENDING\x10\x06\x12\"\n\x1eORCHESTRATION_STATUS_SUSPENDED\x10\x07*A\n\x19\x43reateOrchestrationAction\x12\t\n\x05\x45RROR\x10\x00\x12\n\n\x06IGNORE\x10\x01\x12\r\n\tTERMINATE\x10\x02\x32\xfc\n\n\x15TaskHubSidecarService\x12\x37\n\x05Hello\x12\x16.google.protobuf.Empty\x1a\x16.google.protobuf.Empty\x12@\n\rStartInstance\x12\x16.CreateInstanceRequest\x1a\x17.CreateInstanceResponse\x12\x38\n\x0bGetInstance\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x41\n\x0eRewindInstance\x12\x16.RewindInstanceRequest\x1a\x17.RewindInstanceResponse\x12\x41\n\x14WaitForInstanceStart\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x46\n\x19WaitForInstanceCompletion\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x35\n\nRaiseEvent\x12\x12.RaiseEventRequest\x1a\x13.RaiseEventResponse\x12:\n\x11TerminateInstance\x12\x11.TerminateRequest\x1a\x12.TerminateResponse\x12\x34\n\x0fSuspendInstance\x12\x0f.SuspendRequest\x1a\x10.SuspendResponse\x12\x31\n\x0eResumeInstance\x12\x0e.ResumeRequest\x1a\x0f.ResumeResponse\x12\x41\n\x0eQueryInstances\x12\x16.QueryInstancesRequest\x1a\x17.QueryInstancesResponse\x12\x41\n\x0ePurgeInstances\x12\x16.PurgeInstancesRequest\x1a\x17.PurgeInstancesResponse\x12\x31\n\x0cGetWorkItems\x12\x14.GetWorkItemsRequest\x1a\t.WorkItem0\x01\x12@\n\x14\x43ompleteActivityTask\x12\x11.ActivityResponse\x1a\x15.CompleteTaskResponse\x12H\n\x18\x43ompleteOrchestratorTask\x12\x15.OrchestratorResponse\x1a\x15.CompleteTaskResponse\x12?\n\x12\x43ompleteEntityTask\x12\x12.EntityBatchResult\x1a\x15.CompleteTaskResponse\x12>\n\rCreateTaskHub\x12\x15.CreateTaskHubRequest\x1a\x16.CreateTaskHubResponse\x12>\n\rDeleteTaskHub\x12\x15.DeleteTaskHubRequest\x1a\x16.DeleteTaskHubResponse\x12;\n\x0cSignalEntity\x12\x14.SignalEntityRequest\x1a\x15.SignalEntityResponse\x12\x32\n\tGetEntity\x12\x11.GetEntityRequest\x1a\x12.GetEntityResponse\x12>\n\rQueryEntities\x12\x15.QueryEntitiesRequest\x1a\x16.QueryEntitiesResponse\x12M\n\x12\x43leanEntityStorage\x12\x1a.CleanEntityStorageRequest\x1a\x1b.CleanEntityStorageResponseBf\n1com.microsoft.durabletask.implementation.protobufZ\x10/internal/protos\xaa\x02\x1eMicrosoft.DurableTask.Protobufb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n/durabletask/internal/orchestrator_service.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1bgoogle/protobuf/empty.proto\"^\n\x15OrchestrationInstance\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xed\x01\n\x0f\x41\x63tivityRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\x12\x0e\n\x06taskId\x18\x05 \x01(\x05\x12)\n\x12parentTraceContext\x18\x06 \x01(\x0b\x32\r.TraceContext\"\xaa\x01\n\x10\x41\x63tivityResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0e\n\x06taskId\x18\x02 \x01(\x05\x12,\n\x06result\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\x12\x17\n\x0f\x63ompletionToken\x18\x05 \x01(\t\"\xb2\x01\n\x12TaskFailureDetails\x12\x11\n\terrorType\x18\x01 \x01(\t\x12\x14\n\x0c\x65rrorMessage\x18\x02 \x01(\t\x12\x30\n\nstackTrace\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x0cinnerFailure\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\x12\x16\n\x0eisNonRetriable\x18\x05 \x01(\x08\"\xbf\x01\n\x12ParentInstanceInfo\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12*\n\x04name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\"i\n\x0cTraceContext\x12\x13\n\x0btraceParent\x18\x01 \x01(\t\x12\x12\n\x06spanID\x18\x02 \x01(\tB\x02\x18\x01\x12\x30\n\ntraceState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x88\x03\n\x15\x45xecutionStartedEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\x12+\n\x0eparentInstance\x18\x05 \x01(\x0b\x32\x13.ParentInstanceInfo\x12;\n\x17scheduledStartTimestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12)\n\x12parentTraceContext\x18\x07 \x01(\x0b\x32\r.TraceContext\x12\x39\n\x13orchestrationSpanID\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xa7\x01\n\x17\x45xecutionCompletedEvent\x12\x31\n\x13orchestrationStatus\x18\x01 \x01(\x0e\x32\x14.OrchestrationStatus\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x03 \x01(\x0b\x32\x13.TaskFailureDetails\"X\n\x18\x45xecutionTerminatedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x0f\n\x07recurse\x18\x02 \x01(\x08\"\xa9\x01\n\x12TaskScheduledEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x12parentTraceContext\x18\x04 \x01(\x0b\x32\r.TraceContext\"[\n\x12TaskCompletedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"W\n\x0fTaskFailedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\"\xcf\x01\n$SubOrchestrationInstanceCreatedEvent\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x12parentTraceContext\x18\x05 \x01(\x0b\x32\r.TraceContext\"o\n&SubOrchestrationInstanceCompletedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"k\n#SubOrchestrationInstanceFailedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\"?\n\x11TimerCreatedEvent\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"N\n\x0fTimerFiredEvent\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0f\n\x07timerId\x18\x02 \x01(\x05\"\x1a\n\x18OrchestratorStartedEvent\"\x1c\n\x1aOrchestratorCompletedEvent\"_\n\x0e\x45ventSentEvent\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"M\n\x10\x45ventRaisedEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x05input\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\":\n\x0cGenericEvent\x12*\n\x04\x64\x61ta\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"D\n\x11HistoryStateEvent\x12/\n\x12orchestrationState\x18\x01 \x01(\x0b\x32\x13.OrchestrationState\"A\n\x12\x43ontinueAsNewEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"F\n\x17\x45xecutionSuspendedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"D\n\x15\x45xecutionResumedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x86\t\n\x0cHistoryEvent\x12\x0f\n\x07\x65ventId\x18\x01 \x01(\x05\x12-\n\ttimestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x10\x65xecutionStarted\x18\x03 \x01(\x0b\x32\x16.ExecutionStartedEventH\x00\x12\x36\n\x12\x65xecutionCompleted\x18\x04 \x01(\x0b\x32\x18.ExecutionCompletedEventH\x00\x12\x38\n\x13\x65xecutionTerminated\x18\x05 \x01(\x0b\x32\x19.ExecutionTerminatedEventH\x00\x12,\n\rtaskScheduled\x18\x06 \x01(\x0b\x32\x13.TaskScheduledEventH\x00\x12,\n\rtaskCompleted\x18\x07 \x01(\x0b\x32\x13.TaskCompletedEventH\x00\x12&\n\ntaskFailed\x18\x08 \x01(\x0b\x32\x10.TaskFailedEventH\x00\x12P\n\x1fsubOrchestrationInstanceCreated\x18\t \x01(\x0b\x32%.SubOrchestrationInstanceCreatedEventH\x00\x12T\n!subOrchestrationInstanceCompleted\x18\n \x01(\x0b\x32\'.SubOrchestrationInstanceCompletedEventH\x00\x12N\n\x1esubOrchestrationInstanceFailed\x18\x0b \x01(\x0b\x32$.SubOrchestrationInstanceFailedEventH\x00\x12*\n\x0ctimerCreated\x18\x0c \x01(\x0b\x32\x12.TimerCreatedEventH\x00\x12&\n\ntimerFired\x18\r \x01(\x0b\x32\x10.TimerFiredEventH\x00\x12\x38\n\x13orchestratorStarted\x18\x0e \x01(\x0b\x32\x19.OrchestratorStartedEventH\x00\x12<\n\x15orchestratorCompleted\x18\x0f \x01(\x0b\x32\x1b.OrchestratorCompletedEventH\x00\x12$\n\teventSent\x18\x10 \x01(\x0b\x32\x0f.EventSentEventH\x00\x12(\n\x0b\x65ventRaised\x18\x11 \x01(\x0b\x32\x11.EventRaisedEventH\x00\x12%\n\x0cgenericEvent\x18\x12 \x01(\x0b\x32\r.GenericEventH\x00\x12*\n\x0chistoryState\x18\x13 \x01(\x0b\x32\x12.HistoryStateEventH\x00\x12,\n\rcontinueAsNew\x18\x14 \x01(\x0b\x32\x13.ContinueAsNewEventH\x00\x12\x36\n\x12\x65xecutionSuspended\x18\x15 \x01(\x0b\x32\x18.ExecutionSuspendedEventH\x00\x12\x32\n\x10\x65xecutionResumed\x18\x16 \x01(\x0b\x32\x16.ExecutionResumedEventH\x00\x42\x0b\n\teventType\"~\n\x12ScheduleTaskAction\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x9c\x01\n\x1c\x43reateSubOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"?\n\x11\x43reateTimerAction\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"u\n\x0fSendEventAction\x12(\n\x08instance\x18\x01 \x01(\x0b\x32\x16.OrchestrationInstance\x12\x0c\n\x04name\x18\x02 \x01(\t\x12*\n\x04\x64\x61ta\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xb4\x02\n\x1b\x43ompleteOrchestrationAction\x12\x31\n\x13orchestrationStatus\x18\x01 \x01(\x0e\x32\x14.OrchestrationStatus\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07\x64\x65tails\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\nnewVersion\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12&\n\x0f\x63\x61rryoverEvents\x18\x05 \x03(\x0b\x32\r.HistoryEvent\x12+\n\x0e\x66\x61ilureDetails\x18\x06 \x01(\x0b\x32\x13.TaskFailureDetails\"q\n\x1cTerminateOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x0f\n\x07recurse\x18\x03 \x01(\x08\"\xfa\x02\n\x12OrchestratorAction\x12\n\n\x02id\x18\x01 \x01(\x05\x12+\n\x0cscheduleTask\x18\x02 \x01(\x0b\x32\x13.ScheduleTaskActionH\x00\x12?\n\x16\x63reateSubOrchestration\x18\x03 \x01(\x0b\x32\x1d.CreateSubOrchestrationActionH\x00\x12)\n\x0b\x63reateTimer\x18\x04 \x01(\x0b\x32\x12.CreateTimerActionH\x00\x12%\n\tsendEvent\x18\x05 \x01(\x0b\x32\x10.SendEventActionH\x00\x12=\n\x15\x63ompleteOrchestration\x18\x06 \x01(\x0b\x32\x1c.CompleteOrchestrationActionH\x00\x12?\n\x16terminateOrchestration\x18\x07 \x01(\x0b\x32\x1d.TerminateOrchestrationActionH\x00\x42\x18\n\x16orchestratorActionType\"\xda\x01\n\x13OrchestratorRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12!\n\npastEvents\x18\x03 \x03(\x0b\x32\r.HistoryEvent\x12 \n\tnewEvents\x18\x04 \x03(\x0b\x32\r.HistoryEvent\x12\x37\n\x10\x65ntityParameters\x18\x05 \x01(\x0b\x32\x1d.OrchestratorEntityParameters\"\x9d\x01\n\x14OrchestratorResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12$\n\x07\x61\x63tions\x18\x02 \x03(\x0b\x32\x13.OrchestratorAction\x12\x32\n\x0c\x63ustomStatus\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x17\n\x0f\x63ompletionToken\x18\x04 \x01(\t\"\xa3\x03\n\x15\x43reateInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12;\n\x17scheduledStartTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12?\n\x1aorchestrationIdReusePolicy\x18\x06 \x01(\x0b\x32\x1b.OrchestrationIdReusePolicy\x12\x31\n\x0b\x65xecutionId\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x04tags\x18\x08 \x03(\x0b\x32 .CreateInstanceRequest.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"w\n\x1aOrchestrationIdReusePolicy\x12-\n\x0foperationStatus\x18\x01 \x03(\x0e\x32\x14.OrchestrationStatus\x12*\n\x06\x61\x63tion\x18\x02 \x01(\x0e\x32\x1a.CreateOrchestrationAction\",\n\x16\x43reateInstanceResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\"E\n\x12GetInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x1b\n\x13getInputsAndOutputs\x18\x02 \x01(\x08\"V\n\x13GetInstanceResponse\x12\x0e\n\x06\x65xists\x18\x01 \x01(\x08\x12/\n\x12orchestrationState\x18\x02 \x01(\x0b\x32\x13.OrchestrationState\"Y\n\x15RewindInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x18\n\x16RewindInstanceResponse\"\xa4\x05\n\x12OrchestrationState\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\x13orchestrationStatus\x18\x04 \x01(\x0e\x32\x14.OrchestrationStatus\x12;\n\x17scheduledStartTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x34\n\x10\x63reatedTimestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x38\n\x14lastUpdatedTimestamp\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\x05input\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12,\n\x06output\x18\t \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0c\x63ustomStatus\x18\n \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x0b \x01(\x0b\x32\x13.TaskFailureDetails\x12\x31\n\x0b\x65xecutionId\x18\x0c \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x12\x63ompletedTimestamp\x18\r \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x36\n\x10parentInstanceId\x18\x0e \x01(\x0b\x32\x1c.google.protobuf.StringValue\"b\n\x11RaiseEventRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x14\n\x12RaiseEventResponse\"g\n\x10TerminateRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06output\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x11\n\trecursive\x18\x03 \x01(\x08\"\x13\n\x11TerminateResponse\"R\n\x0eSuspendRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x11\n\x0fSuspendResponse\"Q\n\rResumeRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x10\n\x0eResumeResponse\"6\n\x15QueryInstancesRequest\x12\x1d\n\x05query\x18\x01 \x01(\x0b\x32\x0e.InstanceQuery\"\x82\x03\n\rInstanceQuery\x12+\n\rruntimeStatus\x18\x01 \x03(\x0e\x32\x14.OrchestrationStatus\x12\x33\n\x0f\x63reatedTimeFrom\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rcreatedTimeTo\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0ctaskHubNames\x18\x04 \x03(\x0b\x32\x1c.google.protobuf.StringValue\x12\x18\n\x10maxInstanceCount\x18\x05 \x01(\x05\x12\x37\n\x11\x63ontinuationToken\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10instanceIdPrefix\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1d\n\x15\x66\x65tchInputsAndOutputs\x18\x08 \x01(\x08\"\x82\x01\n\x16QueryInstancesResponse\x12/\n\x12orchestrationState\x18\x01 \x03(\x0b\x32\x13.OrchestrationState\x12\x37\n\x11\x63ontinuationToken\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x80\x01\n\x15PurgeInstancesRequest\x12\x14\n\ninstanceId\x18\x01 \x01(\tH\x00\x12\x33\n\x13purgeInstanceFilter\x18\x02 \x01(\x0b\x32\x14.PurgeInstanceFilterH\x00\x12\x11\n\trecursive\x18\x03 \x01(\x08\x42\t\n\x07request\"\xaa\x01\n\x13PurgeInstanceFilter\x12\x33\n\x0f\x63reatedTimeFrom\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rcreatedTimeTo\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\rruntimeStatus\x18\x03 \x03(\x0e\x32\x14.OrchestrationStatus\"6\n\x16PurgeInstancesResponse\x12\x1c\n\x14\x64\x65letedInstanceCount\x18\x01 \x01(\x05\"0\n\x14\x43reateTaskHubRequest\x12\x18\n\x10recreateIfExists\x18\x01 \x01(\x08\"\x17\n\x15\x43reateTaskHubResponse\"\x16\n\x14\x44\x65leteTaskHubRequest\"\x17\n\x15\x44\x65leteTaskHubResponse\"\xaa\x01\n\x13SignalEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x11\n\trequestId\x18\x04 \x01(\t\x12\x31\n\rscheduledTime\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\x16\n\x14SignalEntityResponse\"<\n\x10GetEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x14\n\x0cincludeState\x18\x02 \x01(\x08\"D\n\x11GetEntityResponse\x12\x0e\n\x06\x65xists\x18\x01 \x01(\x08\x12\x1f\n\x06\x65ntity\x18\x02 \x01(\x0b\x32\x0f.EntityMetadata\"\xcb\x02\n\x0b\x45ntityQuery\x12:\n\x14instanceIdStartsWith\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x34\n\x10lastModifiedFrom\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0elastModifiedTo\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x14\n\x0cincludeState\x18\x04 \x01(\x08\x12\x18\n\x10includeTransient\x18\x05 \x01(\x08\x12-\n\x08pageSize\x18\x06 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12\x37\n\x11\x63ontinuationToken\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"3\n\x14QueryEntitiesRequest\x12\x1b\n\x05query\x18\x01 \x01(\x0b\x32\x0c.EntityQuery\"s\n\x15QueryEntitiesResponse\x12!\n\x08\x65ntities\x18\x01 \x03(\x0b\x32\x0f.EntityMetadata\x12\x37\n\x11\x63ontinuationToken\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xdb\x01\n\x0e\x45ntityMetadata\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x34\n\x10lastModifiedTime\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x18\n\x10\x62\x61\x63klogQueueSize\x18\x03 \x01(\x05\x12.\n\x08lockedBy\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x0fserializedState\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x8f\x01\n\x19\x43leanEntityStorageRequest\x12\x37\n\x11\x63ontinuationToken\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1b\n\x13removeEmptyEntities\x18\x02 \x01(\x08\x12\x1c\n\x14releaseOrphanedLocks\x18\x03 \x01(\x08\"\x92\x01\n\x1a\x43leanEntityStorageResponse\x12\x37\n\x11\x63ontinuationToken\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1c\n\x14\x65mptyEntitiesRemoved\x18\x02 \x01(\x05\x12\x1d\n\x15orphanedLocksReleased\x18\x03 \x01(\x05\"]\n\x1cOrchestratorEntityParameters\x12=\n\x1a\x65ntityMessageReorderWindow\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\"\x82\x01\n\x12\x45ntityBatchRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65ntityState\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12%\n\noperations\x18\x03 \x03(\x0b\x32\x11.OperationRequest\"\xb9\x01\n\x11\x45ntityBatchResult\x12!\n\x07results\x18\x01 \x03(\x0b\x32\x10.OperationResult\x12!\n\x07\x61\x63tions\x18\x02 \x03(\x0b\x32\x10.OperationAction\x12\x31\n\x0b\x65ntityState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\"e\n\x10OperationRequest\x12\x11\n\toperation\x18\x01 \x01(\t\x12\x11\n\trequestId\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"w\n\x0fOperationResult\x12*\n\x07success\x18\x01 \x01(\x0b\x32\x17.OperationResultSuccessH\x00\x12*\n\x07\x66\x61ilure\x18\x02 \x01(\x0b\x32\x17.OperationResultFailureH\x00\x42\x0c\n\nresultType\"F\n\x16OperationResultSuccess\x12,\n\x06result\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"E\n\x16OperationResultFailure\x12+\n\x0e\x66\x61ilureDetails\x18\x01 \x01(\x0b\x32\x13.TaskFailureDetails\"\x9c\x01\n\x0fOperationAction\x12\n\n\x02id\x18\x01 \x01(\x05\x12\'\n\nsendSignal\x18\x02 \x01(\x0b\x32\x11.SendSignalActionH\x00\x12=\n\x15startNewOrchestration\x18\x03 \x01(\x0b\x32\x1c.StartNewOrchestrationActionH\x00\x42\x15\n\x13operationActionType\"\x94\x01\n\x10SendSignalAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rscheduledTime\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xce\x01\n\x1bStartNewOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rscheduledTime\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"j\n\x13GetWorkItemsRequest\x12+\n#maxConcurrentOrchestrationWorkItems\x18\x01 \x01(\x05\x12&\n\x1emaxConcurrentActivityWorkItems\x18\x02 \x01(\x05\"\xe1\x01\n\x08WorkItem\x12\x33\n\x13orchestratorRequest\x18\x01 \x01(\x0b\x32\x14.OrchestratorRequestH\x00\x12+\n\x0f\x61\x63tivityRequest\x18\x02 \x01(\x0b\x32\x10.ActivityRequestH\x00\x12,\n\rentityRequest\x18\x03 \x01(\x0b\x32\x13.EntityBatchRequestH\x00\x12!\n\nhealthPing\x18\x04 \x01(\x0b\x32\x0b.HealthPingH\x00\x12\x17\n\x0f\x63ompletionToken\x18\n \x01(\tB\t\n\x07request\"\x16\n\x14\x43ompleteTaskResponse\"\x0c\n\nHealthPing*\xb5\x02\n\x13OrchestrationStatus\x12 \n\x1cORCHESTRATION_STATUS_RUNNING\x10\x00\x12\"\n\x1eORCHESTRATION_STATUS_COMPLETED\x10\x01\x12)\n%ORCHESTRATION_STATUS_CONTINUED_AS_NEW\x10\x02\x12\x1f\n\x1bORCHESTRATION_STATUS_FAILED\x10\x03\x12!\n\x1dORCHESTRATION_STATUS_CANCELED\x10\x04\x12#\n\x1fORCHESTRATION_STATUS_TERMINATED\x10\x05\x12 \n\x1cORCHESTRATION_STATUS_PENDING\x10\x06\x12\"\n\x1eORCHESTRATION_STATUS_SUSPENDED\x10\x07*A\n\x19\x43reateOrchestrationAction\x12\t\n\x05\x45RROR\x10\x00\x12\n\n\x06IGNORE\x10\x01\x12\r\n\tTERMINATE\x10\x02\x32\xfc\n\n\x15TaskHubSidecarService\x12\x37\n\x05Hello\x12\x16.google.protobuf.Empty\x1a\x16.google.protobuf.Empty\x12@\n\rStartInstance\x12\x16.CreateInstanceRequest\x1a\x17.CreateInstanceResponse\x12\x38\n\x0bGetInstance\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x41\n\x0eRewindInstance\x12\x16.RewindInstanceRequest\x1a\x17.RewindInstanceResponse\x12\x41\n\x14WaitForInstanceStart\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x46\n\x19WaitForInstanceCompletion\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x35\n\nRaiseEvent\x12\x12.RaiseEventRequest\x1a\x13.RaiseEventResponse\x12:\n\x11TerminateInstance\x12\x11.TerminateRequest\x1a\x12.TerminateResponse\x12\x34\n\x0fSuspendInstance\x12\x0f.SuspendRequest\x1a\x10.SuspendResponse\x12\x31\n\x0eResumeInstance\x12\x0e.ResumeRequest\x1a\x0f.ResumeResponse\x12\x41\n\x0eQueryInstances\x12\x16.QueryInstancesRequest\x1a\x17.QueryInstancesResponse\x12\x41\n\x0ePurgeInstances\x12\x16.PurgeInstancesRequest\x1a\x17.PurgeInstancesResponse\x12\x31\n\x0cGetWorkItems\x12\x14.GetWorkItemsRequest\x1a\t.WorkItem0\x01\x12@\n\x14\x43ompleteActivityTask\x12\x11.ActivityResponse\x1a\x15.CompleteTaskResponse\x12H\n\x18\x43ompleteOrchestratorTask\x12\x15.OrchestratorResponse\x1a\x15.CompleteTaskResponse\x12?\n\x12\x43ompleteEntityTask\x12\x12.EntityBatchResult\x1a\x15.CompleteTaskResponse\x12>\n\rCreateTaskHub\x12\x15.CreateTaskHubRequest\x1a\x16.CreateTaskHubResponse\x12>\n\rDeleteTaskHub\x12\x15.DeleteTaskHubRequest\x1a\x16.DeleteTaskHubResponse\x12;\n\x0cSignalEntity\x12\x14.SignalEntityRequest\x1a\x15.SignalEntityResponse\x12\x32\n\tGetEntity\x12\x11.GetEntityRequest\x1a\x12.GetEntityResponse\x12>\n\rQueryEntities\x12\x15.QueryEntitiesRequest\x1a\x16.QueryEntitiesResponse\x12M\n\x12\x43leanEntityStorage\x12\x1a.CleanEntityStorageRequest\x1a\x1b.CleanEntityStorageResponseBf\n1com.microsoft.durabletask.implementation.protobufZ\x10/internal/protos\xaa\x02\x1eMicrosoft.DurableTask.Protobufb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -30,184 +30,184 @@ _globals['_TRACECONTEXT'].fields_by_name['spanID']._serialized_options = b'\030\001' _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._options = None _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_options = b'8\001' - _globals['_ORCHESTRATIONSTATUS']._serialized_start=12097 - _globals['_ORCHESTRATIONSTATUS']._serialized_end=12406 - _globals['_CREATEORCHESTRATIONACTION']._serialized_start=12408 - _globals['_CREATEORCHESTRATIONACTION']._serialized_end=12473 + _globals['_ORCHESTRATIONSTATUS']._serialized_start=12232 + _globals['_ORCHESTRATIONSTATUS']._serialized_end=12541 + _globals['_CREATEORCHESTRATIONACTION']._serialized_start=12543 + _globals['_CREATEORCHESTRATIONACTION']._serialized_end=12608 _globals['_ORCHESTRATIONINSTANCE']._serialized_start=177 _globals['_ORCHESTRATIONINSTANCE']._serialized_end=271 _globals['_ACTIVITYREQUEST']._serialized_start=274 _globals['_ACTIVITYREQUEST']._serialized_end=511 _globals['_ACTIVITYRESPONSE']._serialized_start=514 - _globals['_ACTIVITYRESPONSE']._serialized_end=659 - _globals['_TASKFAILUREDETAILS']._serialized_start=662 - _globals['_TASKFAILUREDETAILS']._serialized_end=840 - _globals['_PARENTINSTANCEINFO']._serialized_start=843 - _globals['_PARENTINSTANCEINFO']._serialized_end=1034 - _globals['_TRACECONTEXT']._serialized_start=1036 - _globals['_TRACECONTEXT']._serialized_end=1141 - _globals['_EXECUTIONSTARTEDEVENT']._serialized_start=1144 - _globals['_EXECUTIONSTARTEDEVENT']._serialized_end=1536 - _globals['_EXECUTIONCOMPLETEDEVENT']._serialized_start=1539 - _globals['_EXECUTIONCOMPLETEDEVENT']._serialized_end=1706 - _globals['_EXECUTIONTERMINATEDEVENT']._serialized_start=1708 - _globals['_EXECUTIONTERMINATEDEVENT']._serialized_end=1796 - _globals['_TASKSCHEDULEDEVENT']._serialized_start=1799 - _globals['_TASKSCHEDULEDEVENT']._serialized_end=1968 - _globals['_TASKCOMPLETEDEVENT']._serialized_start=1970 - _globals['_TASKCOMPLETEDEVENT']._serialized_end=2061 - _globals['_TASKFAILEDEVENT']._serialized_start=2063 - _globals['_TASKFAILEDEVENT']._serialized_end=2150 - _globals['_SUBORCHESTRATIONINSTANCECREATEDEVENT']._serialized_start=2153 - _globals['_SUBORCHESTRATIONINSTANCECREATEDEVENT']._serialized_end=2360 - _globals['_SUBORCHESTRATIONINSTANCECOMPLETEDEVENT']._serialized_start=2362 - _globals['_SUBORCHESTRATIONINSTANCECOMPLETEDEVENT']._serialized_end=2473 - _globals['_SUBORCHESTRATIONINSTANCEFAILEDEVENT']._serialized_start=2475 - _globals['_SUBORCHESTRATIONINSTANCEFAILEDEVENT']._serialized_end=2582 - _globals['_TIMERCREATEDEVENT']._serialized_start=2584 - _globals['_TIMERCREATEDEVENT']._serialized_end=2647 - _globals['_TIMERFIREDEVENT']._serialized_start=2649 - _globals['_TIMERFIREDEVENT']._serialized_end=2727 - _globals['_ORCHESTRATORSTARTEDEVENT']._serialized_start=2729 - _globals['_ORCHESTRATORSTARTEDEVENT']._serialized_end=2755 - _globals['_ORCHESTRATORCOMPLETEDEVENT']._serialized_start=2757 - _globals['_ORCHESTRATORCOMPLETEDEVENT']._serialized_end=2785 - _globals['_EVENTSENTEVENT']._serialized_start=2787 - _globals['_EVENTSENTEVENT']._serialized_end=2882 - _globals['_EVENTRAISEDEVENT']._serialized_start=2884 - _globals['_EVENTRAISEDEVENT']._serialized_end=2961 - _globals['_GENERICEVENT']._serialized_start=2963 - _globals['_GENERICEVENT']._serialized_end=3021 - _globals['_HISTORYSTATEEVENT']._serialized_start=3023 - _globals['_HISTORYSTATEEVENT']._serialized_end=3091 - _globals['_CONTINUEASNEWEVENT']._serialized_start=3093 - _globals['_CONTINUEASNEWEVENT']._serialized_end=3158 - _globals['_EXECUTIONSUSPENDEDEVENT']._serialized_start=3160 - _globals['_EXECUTIONSUSPENDEDEVENT']._serialized_end=3230 - _globals['_EXECUTIONRESUMEDEVENT']._serialized_start=3232 - _globals['_EXECUTIONRESUMEDEVENT']._serialized_end=3300 - _globals['_HISTORYEVENT']._serialized_start=3303 - _globals['_HISTORYEVENT']._serialized_end=4461 - _globals['_SCHEDULETASKACTION']._serialized_start=4463 - _globals['_SCHEDULETASKACTION']._serialized_end=4589 - _globals['_CREATESUBORCHESTRATIONACTION']._serialized_start=4592 - _globals['_CREATESUBORCHESTRATIONACTION']._serialized_end=4748 - _globals['_CREATETIMERACTION']._serialized_start=4750 - _globals['_CREATETIMERACTION']._serialized_end=4813 - _globals['_SENDEVENTACTION']._serialized_start=4815 - _globals['_SENDEVENTACTION']._serialized_end=4932 - _globals['_COMPLETEORCHESTRATIONACTION']._serialized_start=4935 - _globals['_COMPLETEORCHESTRATIONACTION']._serialized_end=5243 - _globals['_TERMINATEORCHESTRATIONACTION']._serialized_start=5245 - _globals['_TERMINATEORCHESTRATIONACTION']._serialized_end=5358 - _globals['_ORCHESTRATORACTION']._serialized_start=5361 - _globals['_ORCHESTRATORACTION']._serialized_end=5739 - _globals['_ORCHESTRATORREQUEST']._serialized_start=5742 - _globals['_ORCHESTRATORREQUEST']._serialized_end=5960 - _globals['_ORCHESTRATORRESPONSE']._serialized_start=5963 - _globals['_ORCHESTRATORRESPONSE']._serialized_end=6095 - _globals['_CREATEINSTANCEREQUEST']._serialized_start=6098 - _globals['_CREATEINSTANCEREQUEST']._serialized_end=6517 - _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_start=6474 - _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_end=6517 - _globals['_ORCHESTRATIONIDREUSEPOLICY']._serialized_start=6519 - _globals['_ORCHESTRATIONIDREUSEPOLICY']._serialized_end=6638 - _globals['_CREATEINSTANCERESPONSE']._serialized_start=6640 - _globals['_CREATEINSTANCERESPONSE']._serialized_end=6684 - _globals['_GETINSTANCEREQUEST']._serialized_start=6686 - _globals['_GETINSTANCEREQUEST']._serialized_end=6755 - _globals['_GETINSTANCERESPONSE']._serialized_start=6757 - _globals['_GETINSTANCERESPONSE']._serialized_end=6843 - _globals['_REWINDINSTANCEREQUEST']._serialized_start=6845 - _globals['_REWINDINSTANCEREQUEST']._serialized_end=6934 - _globals['_REWINDINSTANCERESPONSE']._serialized_start=6936 - _globals['_REWINDINSTANCERESPONSE']._serialized_end=6960 - _globals['_ORCHESTRATIONSTATE']._serialized_start=6963 - _globals['_ORCHESTRATIONSTATE']._serialized_end=7639 - _globals['_RAISEEVENTREQUEST']._serialized_start=7641 - _globals['_RAISEEVENTREQUEST']._serialized_end=7739 - _globals['_RAISEEVENTRESPONSE']._serialized_start=7741 - _globals['_RAISEEVENTRESPONSE']._serialized_end=7761 - _globals['_TERMINATEREQUEST']._serialized_start=7763 - _globals['_TERMINATEREQUEST']._serialized_end=7866 - _globals['_TERMINATERESPONSE']._serialized_start=7868 - _globals['_TERMINATERESPONSE']._serialized_end=7887 - _globals['_SUSPENDREQUEST']._serialized_start=7889 - _globals['_SUSPENDREQUEST']._serialized_end=7971 - _globals['_SUSPENDRESPONSE']._serialized_start=7973 - _globals['_SUSPENDRESPONSE']._serialized_end=7990 - _globals['_RESUMEREQUEST']._serialized_start=7992 - _globals['_RESUMEREQUEST']._serialized_end=8073 - _globals['_RESUMERESPONSE']._serialized_start=8075 - _globals['_RESUMERESPONSE']._serialized_end=8091 - _globals['_QUERYINSTANCESREQUEST']._serialized_start=8093 - _globals['_QUERYINSTANCESREQUEST']._serialized_end=8147 - _globals['_INSTANCEQUERY']._serialized_start=8150 - _globals['_INSTANCEQUERY']._serialized_end=8536 - _globals['_QUERYINSTANCESRESPONSE']._serialized_start=8539 - _globals['_QUERYINSTANCESRESPONSE']._serialized_end=8669 - _globals['_PURGEINSTANCESREQUEST']._serialized_start=8672 - _globals['_PURGEINSTANCESREQUEST']._serialized_end=8800 - _globals['_PURGEINSTANCEFILTER']._serialized_start=8803 - _globals['_PURGEINSTANCEFILTER']._serialized_end=8973 - _globals['_PURGEINSTANCESRESPONSE']._serialized_start=8975 - _globals['_PURGEINSTANCESRESPONSE']._serialized_end=9029 - _globals['_CREATETASKHUBREQUEST']._serialized_start=9031 - _globals['_CREATETASKHUBREQUEST']._serialized_end=9079 - _globals['_CREATETASKHUBRESPONSE']._serialized_start=9081 - _globals['_CREATETASKHUBRESPONSE']._serialized_end=9104 - _globals['_DELETETASKHUBREQUEST']._serialized_start=9106 - _globals['_DELETETASKHUBREQUEST']._serialized_end=9128 - _globals['_DELETETASKHUBRESPONSE']._serialized_start=9130 - _globals['_DELETETASKHUBRESPONSE']._serialized_end=9153 - _globals['_SIGNALENTITYREQUEST']._serialized_start=9156 - _globals['_SIGNALENTITYREQUEST']._serialized_end=9326 - _globals['_SIGNALENTITYRESPONSE']._serialized_start=9328 - _globals['_SIGNALENTITYRESPONSE']._serialized_end=9350 - _globals['_GETENTITYREQUEST']._serialized_start=9352 - _globals['_GETENTITYREQUEST']._serialized_end=9412 - _globals['_GETENTITYRESPONSE']._serialized_start=9414 - _globals['_GETENTITYRESPONSE']._serialized_end=9482 - _globals['_ENTITYQUERY']._serialized_start=9485 - _globals['_ENTITYQUERY']._serialized_end=9816 - _globals['_QUERYENTITIESREQUEST']._serialized_start=9818 - _globals['_QUERYENTITIESREQUEST']._serialized_end=9869 - _globals['_QUERYENTITIESRESPONSE']._serialized_start=9871 - _globals['_QUERYENTITIESRESPONSE']._serialized_end=9986 - _globals['_ENTITYMETADATA']._serialized_start=9989 - _globals['_ENTITYMETADATA']._serialized_end=10208 - _globals['_CLEANENTITYSTORAGEREQUEST']._serialized_start=10211 - _globals['_CLEANENTITYSTORAGEREQUEST']._serialized_end=10354 - _globals['_CLEANENTITYSTORAGERESPONSE']._serialized_start=10357 - _globals['_CLEANENTITYSTORAGERESPONSE']._serialized_end=10503 - _globals['_ORCHESTRATORENTITYPARAMETERS']._serialized_start=10505 - _globals['_ORCHESTRATORENTITYPARAMETERS']._serialized_end=10598 - _globals['_ENTITYBATCHREQUEST']._serialized_start=10601 - _globals['_ENTITYBATCHREQUEST']._serialized_end=10731 - _globals['_ENTITYBATCHRESULT']._serialized_start=10734 - _globals['_ENTITYBATCHRESULT']._serialized_end=10919 - _globals['_OPERATIONREQUEST']._serialized_start=10921 - _globals['_OPERATIONREQUEST']._serialized_end=11022 - _globals['_OPERATIONRESULT']._serialized_start=11024 - _globals['_OPERATIONRESULT']._serialized_end=11143 - _globals['_OPERATIONRESULTSUCCESS']._serialized_start=11145 - _globals['_OPERATIONRESULTSUCCESS']._serialized_end=11215 - _globals['_OPERATIONRESULTFAILURE']._serialized_start=11217 - _globals['_OPERATIONRESULTFAILURE']._serialized_end=11286 - _globals['_OPERATIONACTION']._serialized_start=11289 - _globals['_OPERATIONACTION']._serialized_end=11445 - _globals['_SENDSIGNALACTION']._serialized_start=11448 - _globals['_SENDSIGNALACTION']._serialized_end=11596 - _globals['_STARTNEWORCHESTRATIONACTION']._serialized_start=11599 - _globals['_STARTNEWORCHESTRATIONACTION']._serialized_end=11805 - _globals['_GETWORKITEMSREQUEST']._serialized_start=11807 - _globals['_GETWORKITEMSREQUEST']._serialized_end=11828 - _globals['_WORKITEM']._serialized_start=11831 - _globals['_WORKITEM']._serialized_end=12056 - _globals['_COMPLETETASKRESPONSE']._serialized_start=12058 - _globals['_COMPLETETASKRESPONSE']._serialized_end=12080 - _globals['_HEALTHPING']._serialized_start=12082 - _globals['_HEALTHPING']._serialized_end=12094 - _globals['_TASKHUBSIDECARSERVICE']._serialized_start=12476 - _globals['_TASKHUBSIDECARSERVICE']._serialized_end=13880 + _globals['_ACTIVITYRESPONSE']._serialized_end=684 + _globals['_TASKFAILUREDETAILS']._serialized_start=687 + _globals['_TASKFAILUREDETAILS']._serialized_end=865 + _globals['_PARENTINSTANCEINFO']._serialized_start=868 + _globals['_PARENTINSTANCEINFO']._serialized_end=1059 + _globals['_TRACECONTEXT']._serialized_start=1061 + _globals['_TRACECONTEXT']._serialized_end=1166 + _globals['_EXECUTIONSTARTEDEVENT']._serialized_start=1169 + _globals['_EXECUTIONSTARTEDEVENT']._serialized_end=1561 + _globals['_EXECUTIONCOMPLETEDEVENT']._serialized_start=1564 + _globals['_EXECUTIONCOMPLETEDEVENT']._serialized_end=1731 + _globals['_EXECUTIONTERMINATEDEVENT']._serialized_start=1733 + _globals['_EXECUTIONTERMINATEDEVENT']._serialized_end=1821 + _globals['_TASKSCHEDULEDEVENT']._serialized_start=1824 + _globals['_TASKSCHEDULEDEVENT']._serialized_end=1993 + _globals['_TASKCOMPLETEDEVENT']._serialized_start=1995 + _globals['_TASKCOMPLETEDEVENT']._serialized_end=2086 + _globals['_TASKFAILEDEVENT']._serialized_start=2088 + _globals['_TASKFAILEDEVENT']._serialized_end=2175 + _globals['_SUBORCHESTRATIONINSTANCECREATEDEVENT']._serialized_start=2178 + _globals['_SUBORCHESTRATIONINSTANCECREATEDEVENT']._serialized_end=2385 + _globals['_SUBORCHESTRATIONINSTANCECOMPLETEDEVENT']._serialized_start=2387 + _globals['_SUBORCHESTRATIONINSTANCECOMPLETEDEVENT']._serialized_end=2498 + _globals['_SUBORCHESTRATIONINSTANCEFAILEDEVENT']._serialized_start=2500 + _globals['_SUBORCHESTRATIONINSTANCEFAILEDEVENT']._serialized_end=2607 + _globals['_TIMERCREATEDEVENT']._serialized_start=2609 + _globals['_TIMERCREATEDEVENT']._serialized_end=2672 + _globals['_TIMERFIREDEVENT']._serialized_start=2674 + _globals['_TIMERFIREDEVENT']._serialized_end=2752 + _globals['_ORCHESTRATORSTARTEDEVENT']._serialized_start=2754 + _globals['_ORCHESTRATORSTARTEDEVENT']._serialized_end=2780 + _globals['_ORCHESTRATORCOMPLETEDEVENT']._serialized_start=2782 + _globals['_ORCHESTRATORCOMPLETEDEVENT']._serialized_end=2810 + _globals['_EVENTSENTEVENT']._serialized_start=2812 + _globals['_EVENTSENTEVENT']._serialized_end=2907 + _globals['_EVENTRAISEDEVENT']._serialized_start=2909 + _globals['_EVENTRAISEDEVENT']._serialized_end=2986 + _globals['_GENERICEVENT']._serialized_start=2988 + _globals['_GENERICEVENT']._serialized_end=3046 + _globals['_HISTORYSTATEEVENT']._serialized_start=3048 + _globals['_HISTORYSTATEEVENT']._serialized_end=3116 + _globals['_CONTINUEASNEWEVENT']._serialized_start=3118 + _globals['_CONTINUEASNEWEVENT']._serialized_end=3183 + _globals['_EXECUTIONSUSPENDEDEVENT']._serialized_start=3185 + _globals['_EXECUTIONSUSPENDEDEVENT']._serialized_end=3255 + _globals['_EXECUTIONRESUMEDEVENT']._serialized_start=3257 + _globals['_EXECUTIONRESUMEDEVENT']._serialized_end=3325 + _globals['_HISTORYEVENT']._serialized_start=3328 + _globals['_HISTORYEVENT']._serialized_end=4486 + _globals['_SCHEDULETASKACTION']._serialized_start=4488 + _globals['_SCHEDULETASKACTION']._serialized_end=4614 + _globals['_CREATESUBORCHESTRATIONACTION']._serialized_start=4617 + _globals['_CREATESUBORCHESTRATIONACTION']._serialized_end=4773 + _globals['_CREATETIMERACTION']._serialized_start=4775 + _globals['_CREATETIMERACTION']._serialized_end=4838 + _globals['_SENDEVENTACTION']._serialized_start=4840 + _globals['_SENDEVENTACTION']._serialized_end=4957 + _globals['_COMPLETEORCHESTRATIONACTION']._serialized_start=4960 + _globals['_COMPLETEORCHESTRATIONACTION']._serialized_end=5268 + _globals['_TERMINATEORCHESTRATIONACTION']._serialized_start=5270 + _globals['_TERMINATEORCHESTRATIONACTION']._serialized_end=5383 + _globals['_ORCHESTRATORACTION']._serialized_start=5386 + _globals['_ORCHESTRATORACTION']._serialized_end=5764 + _globals['_ORCHESTRATORREQUEST']._serialized_start=5767 + _globals['_ORCHESTRATORREQUEST']._serialized_end=5985 + _globals['_ORCHESTRATORRESPONSE']._serialized_start=5988 + _globals['_ORCHESTRATORRESPONSE']._serialized_end=6145 + _globals['_CREATEINSTANCEREQUEST']._serialized_start=6148 + _globals['_CREATEINSTANCEREQUEST']._serialized_end=6567 + _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_start=6524 + _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_end=6567 + _globals['_ORCHESTRATIONIDREUSEPOLICY']._serialized_start=6569 + _globals['_ORCHESTRATIONIDREUSEPOLICY']._serialized_end=6688 + _globals['_CREATEINSTANCERESPONSE']._serialized_start=6690 + _globals['_CREATEINSTANCERESPONSE']._serialized_end=6734 + _globals['_GETINSTANCEREQUEST']._serialized_start=6736 + _globals['_GETINSTANCEREQUEST']._serialized_end=6805 + _globals['_GETINSTANCERESPONSE']._serialized_start=6807 + _globals['_GETINSTANCERESPONSE']._serialized_end=6893 + _globals['_REWINDINSTANCEREQUEST']._serialized_start=6895 + _globals['_REWINDINSTANCEREQUEST']._serialized_end=6984 + _globals['_REWINDINSTANCERESPONSE']._serialized_start=6986 + _globals['_REWINDINSTANCERESPONSE']._serialized_end=7010 + _globals['_ORCHESTRATIONSTATE']._serialized_start=7013 + _globals['_ORCHESTRATIONSTATE']._serialized_end=7689 + _globals['_RAISEEVENTREQUEST']._serialized_start=7691 + _globals['_RAISEEVENTREQUEST']._serialized_end=7789 + _globals['_RAISEEVENTRESPONSE']._serialized_start=7791 + _globals['_RAISEEVENTRESPONSE']._serialized_end=7811 + _globals['_TERMINATEREQUEST']._serialized_start=7813 + _globals['_TERMINATEREQUEST']._serialized_end=7916 + _globals['_TERMINATERESPONSE']._serialized_start=7918 + _globals['_TERMINATERESPONSE']._serialized_end=7937 + _globals['_SUSPENDREQUEST']._serialized_start=7939 + _globals['_SUSPENDREQUEST']._serialized_end=8021 + _globals['_SUSPENDRESPONSE']._serialized_start=8023 + _globals['_SUSPENDRESPONSE']._serialized_end=8040 + _globals['_RESUMEREQUEST']._serialized_start=8042 + _globals['_RESUMEREQUEST']._serialized_end=8123 + _globals['_RESUMERESPONSE']._serialized_start=8125 + _globals['_RESUMERESPONSE']._serialized_end=8141 + _globals['_QUERYINSTANCESREQUEST']._serialized_start=8143 + _globals['_QUERYINSTANCESREQUEST']._serialized_end=8197 + _globals['_INSTANCEQUERY']._serialized_start=8200 + _globals['_INSTANCEQUERY']._serialized_end=8586 + _globals['_QUERYINSTANCESRESPONSE']._serialized_start=8589 + _globals['_QUERYINSTANCESRESPONSE']._serialized_end=8719 + _globals['_PURGEINSTANCESREQUEST']._serialized_start=8722 + _globals['_PURGEINSTANCESREQUEST']._serialized_end=8850 + _globals['_PURGEINSTANCEFILTER']._serialized_start=8853 + _globals['_PURGEINSTANCEFILTER']._serialized_end=9023 + _globals['_PURGEINSTANCESRESPONSE']._serialized_start=9025 + _globals['_PURGEINSTANCESRESPONSE']._serialized_end=9079 + _globals['_CREATETASKHUBREQUEST']._serialized_start=9081 + _globals['_CREATETASKHUBREQUEST']._serialized_end=9129 + _globals['_CREATETASKHUBRESPONSE']._serialized_start=9131 + _globals['_CREATETASKHUBRESPONSE']._serialized_end=9154 + _globals['_DELETETASKHUBREQUEST']._serialized_start=9156 + _globals['_DELETETASKHUBREQUEST']._serialized_end=9178 + _globals['_DELETETASKHUBRESPONSE']._serialized_start=9180 + _globals['_DELETETASKHUBRESPONSE']._serialized_end=9203 + _globals['_SIGNALENTITYREQUEST']._serialized_start=9206 + _globals['_SIGNALENTITYREQUEST']._serialized_end=9376 + _globals['_SIGNALENTITYRESPONSE']._serialized_start=9378 + _globals['_SIGNALENTITYRESPONSE']._serialized_end=9400 + _globals['_GETENTITYREQUEST']._serialized_start=9402 + _globals['_GETENTITYREQUEST']._serialized_end=9462 + _globals['_GETENTITYRESPONSE']._serialized_start=9464 + _globals['_GETENTITYRESPONSE']._serialized_end=9532 + _globals['_ENTITYQUERY']._serialized_start=9535 + _globals['_ENTITYQUERY']._serialized_end=9866 + _globals['_QUERYENTITIESREQUEST']._serialized_start=9868 + _globals['_QUERYENTITIESREQUEST']._serialized_end=9919 + _globals['_QUERYENTITIESRESPONSE']._serialized_start=9921 + _globals['_QUERYENTITIESRESPONSE']._serialized_end=10036 + _globals['_ENTITYMETADATA']._serialized_start=10039 + _globals['_ENTITYMETADATA']._serialized_end=10258 + _globals['_CLEANENTITYSTORAGEREQUEST']._serialized_start=10261 + _globals['_CLEANENTITYSTORAGEREQUEST']._serialized_end=10404 + _globals['_CLEANENTITYSTORAGERESPONSE']._serialized_start=10407 + _globals['_CLEANENTITYSTORAGERESPONSE']._serialized_end=10553 + _globals['_ORCHESTRATORENTITYPARAMETERS']._serialized_start=10555 + _globals['_ORCHESTRATORENTITYPARAMETERS']._serialized_end=10648 + _globals['_ENTITYBATCHREQUEST']._serialized_start=10651 + _globals['_ENTITYBATCHREQUEST']._serialized_end=10781 + _globals['_ENTITYBATCHRESULT']._serialized_start=10784 + _globals['_ENTITYBATCHRESULT']._serialized_end=10969 + _globals['_OPERATIONREQUEST']._serialized_start=10971 + _globals['_OPERATIONREQUEST']._serialized_end=11072 + _globals['_OPERATIONRESULT']._serialized_start=11074 + _globals['_OPERATIONRESULT']._serialized_end=11193 + _globals['_OPERATIONRESULTSUCCESS']._serialized_start=11195 + _globals['_OPERATIONRESULTSUCCESS']._serialized_end=11265 + _globals['_OPERATIONRESULTFAILURE']._serialized_start=11267 + _globals['_OPERATIONRESULTFAILURE']._serialized_end=11336 + _globals['_OPERATIONACTION']._serialized_start=11339 + _globals['_OPERATIONACTION']._serialized_end=11495 + _globals['_SENDSIGNALACTION']._serialized_start=11498 + _globals['_SENDSIGNALACTION']._serialized_end=11646 + _globals['_STARTNEWORCHESTRATIONACTION']._serialized_start=11649 + _globals['_STARTNEWORCHESTRATIONACTION']._serialized_end=11855 + _globals['_GETWORKITEMSREQUEST']._serialized_start=11857 + _globals['_GETWORKITEMSREQUEST']._serialized_end=11963 + _globals['_WORKITEM']._serialized_start=11966 + _globals['_WORKITEM']._serialized_end=12191 + _globals['_COMPLETETASKRESPONSE']._serialized_start=12193 + _globals['_COMPLETETASKRESPONSE']._serialized_end=12215 + _globals['_HEALTHPING']._serialized_start=12217 + _globals['_HEALTHPING']._serialized_end=12229 + _globals['_TASKHUBSIDECARSERVICE']._serialized_start=12611 + _globals['_TASKHUBSIDECARSERVICE']._serialized_end=14015 # @@protoc_insertion_point(module_scope) diff --git a/durabletask/internal/orchestrator_service_pb2.pyi b/durabletask/internal/orchestrator_service_pb2.pyi index 82d2e1a..84d2af8 100644 --- a/durabletask/internal/orchestrator_service_pb2.pyi +++ b/durabletask/internal/orchestrator_service_pb2.pyi @@ -63,16 +63,18 @@ class ActivityRequest(_message.Message): def __init__(self, name: _Optional[str] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., orchestrationInstance: _Optional[_Union[OrchestrationInstance, _Mapping]] = ..., taskId: _Optional[int] = ..., parentTraceContext: _Optional[_Union[TraceContext, _Mapping]] = ...) -> None: ... class ActivityResponse(_message.Message): - __slots__ = ("instanceId", "taskId", "result", "failureDetails") + __slots__ = ("instanceId", "taskId", "result", "failureDetails", "completionToken") INSTANCEID_FIELD_NUMBER: _ClassVar[int] TASKID_FIELD_NUMBER: _ClassVar[int] RESULT_FIELD_NUMBER: _ClassVar[int] FAILUREDETAILS_FIELD_NUMBER: _ClassVar[int] + COMPLETIONTOKEN_FIELD_NUMBER: _ClassVar[int] instanceId: str taskId: int result: _wrappers_pb2.StringValue failureDetails: TaskFailureDetails - def __init__(self, instanceId: _Optional[str] = ..., taskId: _Optional[int] = ..., result: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., failureDetails: _Optional[_Union[TaskFailureDetails, _Mapping]] = ...) -> None: ... + completionToken: str + def __init__(self, instanceId: _Optional[str] = ..., taskId: _Optional[int] = ..., result: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., failureDetails: _Optional[_Union[TaskFailureDetails, _Mapping]] = ..., completionToken: _Optional[str] = ...) -> None: ... class TaskFailureDetails(_message.Message): __slots__ = ("errorType", "errorMessage", "stackTrace", "innerFailure", "isNonRetriable") @@ -421,14 +423,16 @@ class OrchestratorRequest(_message.Message): def __init__(self, instanceId: _Optional[str] = ..., executionId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., pastEvents: _Optional[_Iterable[_Union[HistoryEvent, _Mapping]]] = ..., newEvents: _Optional[_Iterable[_Union[HistoryEvent, _Mapping]]] = ..., entityParameters: _Optional[_Union[OrchestratorEntityParameters, _Mapping]] = ...) -> None: ... class OrchestratorResponse(_message.Message): - __slots__ = ("instanceId", "actions", "customStatus") + __slots__ = ("instanceId", "actions", "customStatus", "completionToken") INSTANCEID_FIELD_NUMBER: _ClassVar[int] ACTIONS_FIELD_NUMBER: _ClassVar[int] CUSTOMSTATUS_FIELD_NUMBER: _ClassVar[int] + COMPLETIONTOKEN_FIELD_NUMBER: _ClassVar[int] instanceId: str actions: _containers.RepeatedCompositeFieldContainer[OrchestratorAction] customStatus: _wrappers_pb2.StringValue - def __init__(self, instanceId: _Optional[str] = ..., actions: _Optional[_Iterable[_Union[OrchestratorAction, _Mapping]]] = ..., customStatus: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ...) -> None: ... + completionToken: str + def __init__(self, instanceId: _Optional[str] = ..., actions: _Optional[_Iterable[_Union[OrchestratorAction, _Mapping]]] = ..., customStatus: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., completionToken: _Optional[str] = ...) -> None: ... class CreateInstanceRequest(_message.Message): __slots__ = ("instanceId", "name", "version", "input", "scheduledStartTimestamp", "orchestrationIdReusePolicy", "executionId", "tags") @@ -856,8 +860,12 @@ class StartNewOrchestrationAction(_message.Message): def __init__(self, instanceId: _Optional[str] = ..., name: _Optional[str] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., scheduledTime: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ...) -> None: ... class GetWorkItemsRequest(_message.Message): - __slots__ = () - def __init__(self) -> None: ... + __slots__ = ("maxConcurrentOrchestrationWorkItems", "maxConcurrentActivityWorkItems") + MAXCONCURRENTORCHESTRATIONWORKITEMS_FIELD_NUMBER: _ClassVar[int] + MAXCONCURRENTACTIVITYWORKITEMS_FIELD_NUMBER: _ClassVar[int] + maxConcurrentOrchestrationWorkItems: int + maxConcurrentActivityWorkItems: int + def __init__(self, maxConcurrentOrchestrationWorkItems: _Optional[int] = ..., maxConcurrentActivityWorkItems: _Optional[int] = ...) -> None: ... class WorkItem(_message.Message): __slots__ = ("orchestratorRequest", "activityRequest", "entityRequest", "healthPing", "completionToken") diff --git a/submodules/durabletask-protobuf b/submodules/durabletask-protobuf deleted file mode 160000 index c7d8cd8..0000000 --- a/submodules/durabletask-protobuf +++ /dev/null @@ -1 +0,0 @@ -Subproject commit c7d8cd898017342d090ba9531c3f2ec45b8e07e7 From 37544cf157adcb11726f5d0b5319e7c6f57c566f Mon Sep 17 00:00:00 2001 From: wangbill Date: Thu, 23 Jan 2025 11:57:56 -0800 Subject: [PATCH 05/20] remove gitmodule file (#41) Signed-off-by: Albert Callarisa --- .gitmodules | 3 --- 1 file changed, 3 deletions(-) delete mode 100644 .gitmodules diff --git a/.gitmodules b/.gitmodules deleted file mode 100644 index b371516..0000000 --- a/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "submodules/durabletask-protobuf"] - path = submodules/durabletask-protobuf - url = https://github.com/microsoft/durabletask-protobuf From 2bdf87f7b0e0218df55873e4e2bb9284c0d40138 Mon Sep 17 00:00:00 2001 From: Ryan Lettieri <67934986+RyanLettieri@users.noreply.github.com> Date: Tue, 18 Feb 2025 15:46:37 -0700 Subject: [PATCH 06/20] Creation of DTS example and passing of completionToken (#40) * Creation of DTS example and passing of completionToken Signed-off-by: Ryan Lettieri <67934986+RyanLettieri@users.noreply.github.com> * Adressing review feedback Signed-off-by: Ryan Lettieri * Reverting dapr readme Signed-off-by: Ryan Lettieri * Adding accessTokenManager class for refreshing credential token Signed-off-by: Ryan Lettieri * Adding comments to the example Signed-off-by: Ryan Lettieri * Adding in requirement for azure-identity Signed-off-by: Ryan Lettieri * Moving dts logic into its own module Signed-off-by: Ryan Lettieri * Fixing whitesapce Signed-off-by: Ryan Lettieri * Updating dts client to refresh token Signed-off-by: Ryan Lettieri * Cleaning up construction of dts objects and improving examples Signed-off-by: Ryan Lettieri * Migrating shared access token logic to new grpc class Signed-off-by: Ryan Lettieri * Adding log statements to access_token_manager Signed-off-by: Ryan Lettieri * breaking for loop when setting interceptors Signed-off-by: Ryan Lettieri * Removing changes to client.py and adding additional steps to readme.md Signed-off-by: Ryan Lettieri * Refactoring client and worker to pass around interceptors Signed-off-by: Ryan Lettieri * Fixing import for DefaultClientInterceptorImpl Signed-off-by: Ryan Lettieri * Adressing round 1 of feedback Signed-off-by: Ryan Lettieri * Fixing interceptor issue Signed-off-by: Ryan Lettieri * Moving some files around to remove dependencies Signed-off-by: Ryan Lettieri * Adressing more feedback Signed-off-by: Ryan Lettieri * More review feedback Signed-off-by: Ryan Lettieri * Passing token credential as an argument rather than 2 strings Signed-off-by: Ryan Lettieri * More review feedback for token passing Signed-off-by: Ryan Lettieri * Addressing None comment and using correct metadata Signed-off-by: Ryan Lettieri * Updating unit tests Signed-off-by: Ryan Lettieri * Fixing the type for the unit test Signed-off-by: Ryan Lettieri * Fixing grpc calls Signed-off-by: Ryan Lettieri * Fix linter errors and update documentation * Specifying version reqiuirement for pyproject.toml Signed-off-by: Ryan Lettieri * Updating README Signed-off-by: Ryan Lettieri * Adding comment for credential type Signed-off-by: Ryan Lettieri --------- Signed-off-by: Ryan Lettieri <67934986+RyanLettieri@users.noreply.github.com> Signed-off-by: Ryan Lettieri Co-authored-by: Chris Gillum Signed-off-by: Albert Callarisa --- CHANGELOG.md | 1 + README.md | 7 +- durabletask-azuremanaged/__init__.py | 0 .../durabletask/azuremanaged/__init__.py | 0 .../durabletask/azuremanaged/client.py | 30 ++++++ .../internal/access_token_manager.py | 49 ++++++++++ .../internal/durabletask_grpc_interceptor.py | 41 ++++++++ .../durabletask/azuremanaged/worker.py | 30 ++++++ durabletask-azuremanaged/pyproject.toml | 41 ++++++++ durabletask/client.py | 26 ++++- durabletask/internal/grpc_interceptor.py | 12 +-- durabletask/internal/shared.py | 22 +++-- durabletask/task.py | 7 +- durabletask/worker.py | 42 +++++--- examples/README.md | 2 +- examples/dts/README.md | 55 +++++++++++ examples/dts/dts_activity_sequence.py | 71 ++++++++++++++ examples/dts/dts_fanout_fanin.py | 96 +++++++++++++++++++ requirements.txt | 2 + tests/test_client.py | 34 +++---- 20 files changed, 514 insertions(+), 54 deletions(-) create mode 100644 durabletask-azuremanaged/__init__.py create mode 100644 durabletask-azuremanaged/durabletask/azuremanaged/__init__.py create mode 100644 durabletask-azuremanaged/durabletask/azuremanaged/client.py create mode 100644 durabletask-azuremanaged/durabletask/azuremanaged/internal/access_token_manager.py create mode 100644 durabletask-azuremanaged/durabletask/azuremanaged/internal/durabletask_grpc_interceptor.py create mode 100644 durabletask-azuremanaged/durabletask/azuremanaged/worker.py create mode 100644 durabletask-azuremanaged/pyproject.toml create mode 100644 examples/dts/README.md create mode 100644 examples/dts/dts_activity_sequence.py create mode 100644 examples/dts/dts_fanout_fanin.py diff --git a/CHANGELOG.md b/CHANGELOG.md index ee736f0..13b0e69 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Added `set_custom_status` orchestrator API ([#31](https://github.com/microsoft/durabletask-python/pull/31)) - contributed by [@famarting](https://github.com/famarting) - Added `purge_orchestration` client API ([#34](https://github.com/microsoft/durabletask-python/pull/34)) - contributed by [@famarting](https://github.com/famarting) +- Added new `durabletask-azuremanaged` package for use with the [Durable Task Scheduler](https://techcommunity.microsoft.com/blog/appsonazureblog/announcing-limited-early-access-of-the-durable-task-scheduler-for-azure-durable-/4286526) - by [@RyanLettieri](https://github.com/RyanLettieri) ### Changes diff --git a/README.md b/README.md index 644635e..87af41d 100644 --- a/README.md +++ b/README.md @@ -1,15 +1,14 @@ -# Durable Task Client SDK for Python +# Durable Task SDK for Python [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT) [![Build Validation](https://github.com/microsoft/durabletask-python/actions/workflows/pr-validation.yml/badge.svg)](https://github.com/microsoft/durabletask-python/actions/workflows/pr-validation.yml) [![PyPI version](https://badge.fury.io/py/durabletask.svg)](https://badge.fury.io/py/durabletask) -This repo contains a Python client SDK for use with the [Durable Task Framework for Go](https://github.com/microsoft/durabletask-go) and [Dapr Workflow](https://docs.dapr.io/developing-applications/building-blocks/workflow/workflow-overview/). With this SDK, you can define, schedule, and manage durable orchestrations using ordinary Python code. +This repo contains a Python SDK for use with the [Azure Durable Task Scheduler](https://techcommunity.microsoft.com/blog/appsonazureblog/announcing-limited-early-access-of-the-durable-task-scheduler-for-azure-durable-/4286526) and the [Durable Task Framework for Go](https://github.com/microsoft/durabletask-go). With this SDK, you can define, schedule, and manage durable orchestrations using ordinary Python code. ⚠️ **This SDK is currently under active development and is not yet ready for production use.** ⚠️ -> Note that this project is **not** currently affiliated with the [Durable Functions](https://docs.microsoft.com/azure/azure-functions/durable/durable-functions-overview) project for Azure Functions. If you are looking for a Python SDK for Durable Functions, please see [this repo](https://github.com/Azure/azure-functions-durable-python). - +> Note that this SDK is **not** currently compatible with [Azure Durable Functions](https://docs.microsoft.com/azure/azure-functions/durable/durable-functions-overview). If you are looking for a Python SDK for Azure Durable Functions, please see [this repo](https://github.com/Azure/azure-functions-durable-python). ## Supported patterns diff --git a/durabletask-azuremanaged/__init__.py b/durabletask-azuremanaged/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/durabletask-azuremanaged/durabletask/azuremanaged/__init__.py b/durabletask-azuremanaged/durabletask/azuremanaged/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/durabletask-azuremanaged/durabletask/azuremanaged/client.py b/durabletask-azuremanaged/durabletask/azuremanaged/client.py new file mode 100644 index 0000000..f641eae --- /dev/null +++ b/durabletask-azuremanaged/durabletask/azuremanaged/client.py @@ -0,0 +1,30 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from azure.core.credentials import TokenCredential + +from durabletask.azuremanaged.internal.durabletask_grpc_interceptor import \ + DTSDefaultClientInterceptorImpl +from durabletask.client import TaskHubGrpcClient + + +# Client class used for Durable Task Scheduler (DTS) +class DurableTaskSchedulerClient(TaskHubGrpcClient): + def __init__(self, *, + host_address: str, + taskhub: str, + token_credential: TokenCredential, + secure_channel: bool = True): + + if not taskhub: + raise ValueError("Taskhub value cannot be empty. Please provide a value for your taskhub") + + interceptors = [DTSDefaultClientInterceptorImpl(token_credential, taskhub)] + + # We pass in None for the metadata so we don't construct an additional interceptor in the parent class + # Since the parent class doesn't use anything metadata for anything else, we can set it as None + super().__init__( + host_address=host_address, + secure_channel=secure_channel, + metadata=None, + interceptors=interceptors) diff --git a/durabletask-azuremanaged/durabletask/azuremanaged/internal/access_token_manager.py b/durabletask-azuremanaged/durabletask/azuremanaged/internal/access_token_manager.py new file mode 100644 index 0000000..f0e7a42 --- /dev/null +++ b/durabletask-azuremanaged/durabletask/azuremanaged/internal/access_token_manager.py @@ -0,0 +1,49 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +from datetime import datetime, timedelta, timezone +from typing import Optional + +from azure.core.credentials import AccessToken, TokenCredential + +import durabletask.internal.shared as shared + + +# By default, when there's 10minutes left before the token expires, refresh the token +class AccessTokenManager: + + _token: Optional[AccessToken] + + def __init__(self, token_credential: Optional[TokenCredential], refresh_interval_seconds: int = 600): + self._scope = "https://durabletask.io/.default" + self._refresh_interval_seconds = refresh_interval_seconds + self._logger = shared.get_logger("token_manager") + + self._credential = token_credential + + if self._credential is not None: + self._token = self._credential.get_token(self._scope) + self.expiry_time = datetime.fromtimestamp(self._token.expires_on, tz=timezone.utc) + else: + self._token = None + self.expiry_time = None + + def get_access_token(self) -> Optional[AccessToken]: + if self._token is None or self.is_token_expired(): + self.refresh_token() + return self._token + + # Checks if the token is expired, or if it will expire in the next "refresh_interval_seconds" seconds. + # For example, if the token is created to have a lifespan of 2 hours, and the refresh buffer is set to 30 minutes, + # We will grab a new token when there're 30minutes left on the lifespan of the token + def is_token_expired(self) -> bool: + if self.expiry_time is None: + return True + return datetime.now(timezone.utc) >= (self.expiry_time - timedelta(seconds=self._refresh_interval_seconds)) + + def refresh_token(self): + if self._credential is not None: + self._token = self._credential.get_token(self._scope) + + # Convert UNIX timestamp to timezone-aware datetime + self.expiry_time = datetime.fromtimestamp(self._token.expires_on, tz=timezone.utc) + self._logger.debug(f"Token refreshed. Expires at: {self.expiry_time}") diff --git a/durabletask-azuremanaged/durabletask/azuremanaged/internal/durabletask_grpc_interceptor.py b/durabletask-azuremanaged/durabletask/azuremanaged/internal/durabletask_grpc_interceptor.py new file mode 100644 index 0000000..a23cac9 --- /dev/null +++ b/durabletask-azuremanaged/durabletask/azuremanaged/internal/durabletask_grpc_interceptor.py @@ -0,0 +1,41 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +import grpc +from azure.core.credentials import TokenCredential + +from durabletask.azuremanaged.internal.access_token_manager import \ + AccessTokenManager +from durabletask.internal.grpc_interceptor import ( + DefaultClientInterceptorImpl, _ClientCallDetails) + + +class DTSDefaultClientInterceptorImpl (DefaultClientInterceptorImpl): + """The class implements a UnaryUnaryClientInterceptor, UnaryStreamClientInterceptor, + StreamUnaryClientInterceptor and StreamStreamClientInterceptor from grpc to add an + interceptor to add additional headers to all calls as needed.""" + + def __init__(self, token_credential: TokenCredential, taskhub_name: str): + self._metadata = [("taskhub", taskhub_name)] + super().__init__(self._metadata) + + if token_credential is not None: + self._token_credential = token_credential + self._token_manager = AccessTokenManager(token_credential=self._token_credential) + access_token = self._token_manager.get_access_token() + if access_token is not None: + self._metadata.append(("authorization", f"Bearer {access_token.token}")) + + def _intercept_call( + self, client_call_details: _ClientCallDetails) -> grpc.ClientCallDetails: + """Internal intercept_call implementation which adds metadata to grpc metadata in the RPC + call details.""" + # Refresh the auth token if it is present and needed + if self._metadata is not None: + for i, (key, _) in enumerate(self._metadata): + if key.lower() == "authorization": # Ensure case-insensitive comparison + new_token = self._token_manager.get_access_token() # Get the new token + if new_token is not None: + self._metadata[i] = ("authorization", f"Bearer {new_token.token}") # Update the token + + return super()._intercept_call(client_call_details) diff --git a/durabletask-azuremanaged/durabletask/azuremanaged/worker.py b/durabletask-azuremanaged/durabletask/azuremanaged/worker.py new file mode 100644 index 0000000..d10c2f7 --- /dev/null +++ b/durabletask-azuremanaged/durabletask/azuremanaged/worker.py @@ -0,0 +1,30 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from azure.core.credentials import TokenCredential + +from durabletask.azuremanaged.internal.durabletask_grpc_interceptor import \ + DTSDefaultClientInterceptorImpl +from durabletask.worker import TaskHubGrpcWorker + + +# Worker class used for Durable Task Scheduler (DTS) +class DurableTaskSchedulerWorker(TaskHubGrpcWorker): + def __init__(self, *, + host_address: str, + taskhub: str, + token_credential: TokenCredential, + secure_channel: bool = True): + + if not taskhub: + raise ValueError("The taskhub value cannot be empty.") + + interceptors = [DTSDefaultClientInterceptorImpl(token_credential, taskhub)] + + # We pass in None for the metadata so we don't construct an additional interceptor in the parent class + # Since the parent class doesn't use anything metadata for anything else, we can set it as None + super().__init__( + host_address=host_address, + secure_channel=secure_channel, + metadata=None, + interceptors=interceptors) diff --git a/durabletask-azuremanaged/pyproject.toml b/durabletask-azuremanaged/pyproject.toml new file mode 100644 index 0000000..ac6be6f --- /dev/null +++ b/durabletask-azuremanaged/pyproject.toml @@ -0,0 +1,41 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +# For more information on pyproject.toml, see https://peps.python.org/pep-0621/ + +[build-system] +requires = ["setuptools", "wheel"] +build-backend = "setuptools.build_meta" + +[project] +name = "durabletask.azuremanaged" +version = "0.1b1" +description = "Extensions for the Durable Task Python SDK for integrating with the Durable Task Scheduler in Azure" +keywords = [ + "durable", + "task", + "workflow", + "azure" +] +classifiers = [ + "Development Status :: 3 - Alpha", + "Programming Language :: Python :: 3", + "License :: OSI Approved :: MIT License", +] +requires-python = ">=3.9" +license = {file = "LICENSE"} +readme = "README.md" +dependencies = [ + "durabletask>=0.2.0", + "azure-identity>=1.19.0" +] + +[project.urls] +repository = "https://github.com/microsoft/durabletask-python" +changelog = "https://github.com/microsoft/durabletask-python/blob/main/CHANGELOG.md" + +[tool.setuptools.packages.find] +include = ["durabletask.azuremanaged", "durabletask.azuremanaged.*"] + +[tool.pytest.ini_options] +minversion = "6.0" diff --git a/durabletask/client.py b/durabletask/client.py index 31953ae..60e194f 100644 --- a/durabletask/client.py +++ b/durabletask/client.py @@ -6,7 +6,7 @@ from dataclasses import dataclass from datetime import datetime from enum import Enum -from typing import Any, Optional, TypeVar, Union +from typing import Any, Optional, Sequence, TypeVar, Union import grpc from google.protobuf import wrappers_pb2 @@ -16,6 +16,7 @@ import durabletask.internal.orchestrator_service_pb2_grpc as stubs import durabletask.internal.shared as shared from durabletask import task +from durabletask.internal.grpc_interceptor import DefaultClientInterceptorImpl TInput = TypeVar('TInput') TOutput = TypeVar('TOutput') @@ -96,8 +97,25 @@ def __init__(self, *, metadata: Optional[list[tuple[str, str]]] = None, log_handler: Optional[logging.Handler] = None, log_formatter: Optional[logging.Formatter] = None, - secure_channel: bool = False): - channel = shared.get_grpc_channel(host_address, metadata, secure_channel=secure_channel) + secure_channel: bool = False, + interceptors: Optional[Sequence[shared.ClientInterceptor]] = None): + + # If the caller provided metadata, we need to create a new interceptor for it and + # add it to the list of interceptors. + if interceptors is not None: + interceptors = list(interceptors) + if metadata is not None: + interceptors.append(DefaultClientInterceptorImpl(metadata)) + elif metadata is not None: + interceptors = [DefaultClientInterceptorImpl(metadata)] + else: + interceptors = None + + channel = shared.get_grpc_channel( + host_address=host_address, + secure_channel=secure_channel, + interceptors=interceptors + ) self._stub = stubs.TaskHubSidecarServiceStub(channel) self._logger = shared.get_logger("client", log_handler, log_formatter) @@ -116,7 +134,7 @@ def schedule_new_orchestration(self, orchestrator: Union[task.Orchestrator[TInpu scheduledStartTimestamp=helpers.new_timestamp(start_at) if start_at else None, version=wrappers_pb2.StringValue(value=""), orchestrationIdReusePolicy=reuse_id_policy, - ) + ) self._logger.info(f"Starting new '{name}' instance with ID = '{req.instanceId}'.") res: pb.CreateInstanceResponse = self._stub.StartInstance(req) diff --git a/durabletask/internal/grpc_interceptor.py b/durabletask/internal/grpc_interceptor.py index 738fca9..69db3c5 100644 --- a/durabletask/internal/grpc_interceptor.py +++ b/durabletask/internal/grpc_interceptor.py @@ -19,10 +19,10 @@ class _ClientCallDetails( class DefaultClientInterceptorImpl ( - grpc.UnaryUnaryClientInterceptor, grpc.UnaryStreamClientInterceptor, - grpc.StreamUnaryClientInterceptor, grpc.StreamStreamClientInterceptor): + grpc.UnaryUnaryClientInterceptor, grpc.UnaryStreamClientInterceptor, + grpc.StreamUnaryClientInterceptor, grpc.StreamStreamClientInterceptor): """The class implements a UnaryUnaryClientInterceptor, UnaryStreamClientInterceptor, - StreamUnaryClientInterceptor and StreamStreamClientInterceptor from grpc to add an + StreamUnaryClientInterceptor and StreamStreamClientInterceptor from grpc to add an interceptor to add additional headers to all calls as needed.""" def __init__(self, metadata: list[tuple[str, str]]): @@ -30,17 +30,17 @@ def __init__(self, metadata: list[tuple[str, str]]): self._metadata = metadata def _intercept_call( - self, client_call_details: _ClientCallDetails) -> grpc.ClientCallDetails: + self, client_call_details: _ClientCallDetails) -> grpc.ClientCallDetails: """Internal intercept_call implementation which adds metadata to grpc metadata in the RPC call details.""" if self._metadata is None: return client_call_details - + if client_call_details.metadata is not None: metadata = list(client_call_details.metadata) else: metadata = [] - + metadata.extend(self._metadata) client_call_details = _ClientCallDetails( client_call_details.method, client_call_details.timeout, metadata, diff --git a/durabletask/internal/shared.py b/durabletask/internal/shared.py index c4f3aa4..1872ad4 100644 --- a/durabletask/internal/shared.py +++ b/durabletask/internal/shared.py @@ -5,11 +5,16 @@ import json import logging from types import SimpleNamespace -from typing import Any, Optional +from typing import Any, Optional, Sequence, Union import grpc -from durabletask.internal.grpc_interceptor import DefaultClientInterceptorImpl +ClientInterceptor = Union[ + grpc.UnaryUnaryClientInterceptor, + grpc.UnaryStreamClientInterceptor, + grpc.StreamUnaryClientInterceptor, + grpc.StreamStreamClientInterceptor +] # Field name used to indicate that an object was automatically serialized # and should be deserialized as a SimpleNamespace @@ -25,8 +30,9 @@ def get_default_host_address() -> str: def get_grpc_channel( host_address: Optional[str], - metadata: Optional[list[tuple[str, str]]], - secure_channel: bool = False) -> grpc.Channel: + secure_channel: bool = False, + interceptors: Optional[Sequence[ClientInterceptor]] = None) -> grpc.Channel: + if host_address is None: host_address = get_default_host_address() @@ -44,16 +50,18 @@ def get_grpc_channel( host_address = host_address[len(protocol):] break + # Create the base channel if secure_channel: channel = grpc.secure_channel(host_address, grpc.ssl_channel_credentials()) else: channel = grpc.insecure_channel(host_address) - if metadata is not None and len(metadata) > 0: - interceptors = [DefaultClientInterceptorImpl(metadata)] + # Apply interceptors ONLY if they exist + if interceptors: channel = grpc.intercept_channel(channel, *interceptors) return channel + def get_logger( name_suffix: str, log_handler: Optional[logging.Handler] = None, @@ -98,7 +106,7 @@ def default(self, obj): if dataclasses.is_dataclass(obj): # Dataclasses are not serializable by default, so we convert them to a dict and mark them for # automatic deserialization by the receiver - d = dataclasses.asdict(obj) # type: ignore + d = dataclasses.asdict(obj) # type: ignore d[AUTO_SERIALIZED] = True return d elif isinstance(obj, SimpleNamespace): diff --git a/durabletask/task.py b/durabletask/task.py index a40602b..9e8a08a 100644 --- a/durabletask/task.py +++ b/durabletask/task.py @@ -277,6 +277,7 @@ def get_tasks(self) -> list[Task]: def on_child_completed(self, task: Task[T]): pass + class WhenAllTask(CompositeTask[list[T]]): """A task that completes when all of its child tasks complete.""" @@ -333,7 +334,7 @@ class RetryableTask(CompletableTask[T]): """A task that can be retried according to a retry policy.""" def __init__(self, retry_policy: RetryPolicy, action: pb.OrchestratorAction, - start_time:datetime, is_sub_orch: bool) -> None: + start_time: datetime, is_sub_orch: bool) -> None: super().__init__() self._action = action self._retry_policy = retry_policy @@ -343,7 +344,7 @@ def __init__(self, retry_policy: RetryPolicy, action: pb.OrchestratorAction, def increment_attempt_count(self) -> None: self._attempt_count += 1 - + def compute_next_delay(self) -> Optional[timedelta]: if self._attempt_count >= self._retry_policy.max_number_of_attempts: return None @@ -351,7 +352,7 @@ def compute_next_delay(self) -> Optional[timedelta]: retry_expiration: datetime = datetime.max if self._retry_policy.retry_timeout is not None and self._retry_policy.retry_timeout != datetime.max: retry_expiration = self._start_time + self._retry_policy.retry_timeout - + if self._retry_policy.backoff_coefficient is None: backoff_coefficient = 1.0 else: diff --git a/durabletask/worker.py b/durabletask/worker.py index 75e2e37..2c31e52 100644 --- a/durabletask/worker.py +++ b/durabletask/worker.py @@ -9,7 +9,7 @@ from typing import Any, Generator, Optional, Sequence, TypeVar, Union import grpc -from google.protobuf import empty_pb2, wrappers_pb2 +from google.protobuf import empty_pb2 import durabletask.internal.helpers as ph import durabletask.internal.helpers as pbh @@ -17,6 +17,7 @@ import durabletask.internal.orchestrator_service_pb2_grpc as stubs import durabletask.internal.shared as shared from durabletask import task +from durabletask.internal.grpc_interceptor import DefaultClientInterceptorImpl TInput = TypeVar('TInput') TOutput = TypeVar('TOutput') @@ -82,21 +83,32 @@ class ActivityNotRegisteredError(ValueError): class TaskHubGrpcWorker: _response_stream: Optional[grpc.Future] = None + _interceptors: Optional[list[shared.ClientInterceptor]] = None def __init__(self, *, host_address: Optional[str] = None, metadata: Optional[list[tuple[str, str]]] = None, log_handler=None, log_formatter: Optional[logging.Formatter] = None, - secure_channel: bool = False): + secure_channel: bool = False, + interceptors: Optional[Sequence[shared.ClientInterceptor]] = None): self._registry = _Registry() self._host_address = host_address if host_address else shared.get_default_host_address() - self._metadata = metadata self._logger = shared.get_logger("worker", log_handler, log_formatter) self._shutdown = Event() self._is_running = False self._secure_channel = secure_channel + # Determine the interceptors to use + if interceptors is not None: + self._interceptors = list(interceptors) + if metadata: + self._interceptors.append(DefaultClientInterceptorImpl(metadata)) + elif metadata: + self._interceptors = [DefaultClientInterceptorImpl(metadata)] + else: + self._interceptors = None + def __enter__(self): return self @@ -117,7 +129,7 @@ def add_activity(self, fn: task.Activity) -> str: def start(self): """Starts the worker on a background thread and begins listening for work items.""" - channel = shared.get_grpc_channel(self._host_address, self._metadata, self._secure_channel) + channel = shared.get_grpc_channel(self._host_address, self._secure_channel, self._interceptors) stub = stubs.TaskHubSidecarServiceStub(channel) if self._is_running: @@ -143,9 +155,11 @@ def run_loop(): request_type = work_item.WhichOneof('request') self._logger.debug(f'Received "{request_type}" work item') if work_item.HasField('orchestratorRequest'): - executor.submit(self._execute_orchestrator, work_item.orchestratorRequest, stub) + executor.submit(self._execute_orchestrator, work_item.orchestratorRequest, stub, work_item.completionToken) elif work_item.HasField('activityRequest'): - executor.submit(self._execute_activity, work_item.activityRequest, stub) + executor.submit(self._execute_activity, work_item.activityRequest, stub, work_item.completionToken) + elif work_item.HasField('healthPing'): + pass # no-op else: self._logger.warning(f'Unexpected work item type: {request_type}') @@ -184,26 +198,27 @@ def stop(self): self._logger.info("Worker shutdown completed") self._is_running = False - def _execute_orchestrator(self, req: pb.OrchestratorRequest, stub: stubs.TaskHubSidecarServiceStub): + def _execute_orchestrator(self, req: pb.OrchestratorRequest, stub: stubs.TaskHubSidecarServiceStub, completionToken): try: executor = _OrchestrationExecutor(self._registry, self._logger) result = executor.execute(req.instanceId, req.pastEvents, req.newEvents) res = pb.OrchestratorResponse( instanceId=req.instanceId, actions=result.actions, - customStatus=pbh.get_string_value(result.encoded_custom_status)) + customStatus=pbh.get_string_value(result.encoded_custom_status), + completionToken=completionToken) except Exception as ex: self._logger.exception(f"An error occurred while trying to execute instance '{req.instanceId}': {ex}") failure_details = pbh.new_failure_details(ex) actions = [pbh.new_complete_orchestration_action(-1, pb.ORCHESTRATION_STATUS_FAILED, "", failure_details)] - res = pb.OrchestratorResponse(instanceId=req.instanceId, actions=actions) + res = pb.OrchestratorResponse(instanceId=req.instanceId, actions=actions, completionToken=completionToken) try: stub.CompleteOrchestratorTask(res) except Exception as ex: self._logger.exception(f"Failed to deliver orchestrator response for '{req.instanceId}' to sidecar: {ex}") - def _execute_activity(self, req: pb.ActivityRequest, stub: stubs.TaskHubSidecarServiceStub): + def _execute_activity(self, req: pb.ActivityRequest, stub: stubs.TaskHubSidecarServiceStub, completionToken): instance_id = req.orchestrationInstance.instanceId try: executor = _ActivityExecutor(self._registry, self._logger) @@ -211,12 +226,14 @@ def _execute_activity(self, req: pb.ActivityRequest, stub: stubs.TaskHubSidecarS res = pb.ActivityResponse( instanceId=instance_id, taskId=req.taskId, - result=pbh.get_string_value(result)) + result=pbh.get_string_value(result), + completionToken=completionToken) except Exception as ex: res = pb.ActivityResponse( instanceId=instance_id, taskId=req.taskId, - failureDetails=pbh.new_failure_details(ex)) + failureDetails=pbh.new_failure_details(ex), + completionToken=completionToken) try: stub.CompleteActivityTask(res) @@ -471,6 +488,7 @@ def __init__(self, actions: list[pb.OrchestratorAction], encoded_custom_status: self.actions = actions self.encoded_custom_status = encoded_custom_status + class _OrchestrationExecutor: _generator: Optional[task.Orchestrator] = None diff --git a/examples/README.md b/examples/README.md index ec9088f..7cfbc7a 100644 --- a/examples/README.md +++ b/examples/README.md @@ -8,7 +8,7 @@ All the examples assume that you have a Durable Task-compatible sidecar running 1. Install the latest version of the [Dapr CLI](https://docs.dapr.io/getting-started/install-dapr-cli/), which contains and exposes an embedded version of the Durable Task engine. The setup process (which requires Docker) will configure the workflow engine to store state in a local Redis container. -1. Clone and run the [Durable Task Sidecar](https://github.com/microsoft/durabletask-go) project locally (requires Go 1.18 or higher). Orchestration state will be stored in a local sqlite database. +2. Clone and run the [Durable Task Sidecar](https://github.com/microsoft/durabletask-go) project locally (requires Go 1.18 or higher). Orchestration state will be stored in a local sqlite database. ## Running the examples diff --git a/examples/dts/README.md b/examples/dts/README.md new file mode 100644 index 0000000..9b4a3fd --- /dev/null +++ b/examples/dts/README.md @@ -0,0 +1,55 @@ +# Examples + +This directory contains examples of how to author durable orchestrations using the Durable Task Python SDK in conjunction with the Durable Task Scheduler (DTS). Please note that the installation instructions provided below will use the version of DTS directly from the your branch rather than installing through PyPI. + +## Prerequisites + +All the examples assume that you have a Durable Task Scheduler taskhub created. + +The simplest way to create a taskhub is by using the az cli commands: + +1. Create a scheduler: + az durabletask scheduler create --resource-group --name --location --ip-allowlist "[0.0.0.0/0]" --sku-capacity 1 --sku-name "Dedicated" --tags "{}" + +1. Create your taskhub + + ```bash + az durabletask taskhub create --resource-group --scheduler-name --name + ``` + +1. Retrieve the endpoint for the scheduler. This can be done by locating the taskhub in the portal. + +1. Set the appropriate environment variables for the TASKHUB and ENDPOINT + + ```bash + export TASKHUB= + export ENDPOINT= + ``` + +1. Since the samples rely on azure identity, ensure the package is installed and up-to-date + + ```bash + python3 -m pip install azure-identity + ``` + +1. Install the correct packages from the top level of this repository, i.e. durabletask-python/ + + ```bash + python3 -m pip install . + ``` + +1. Install the DTS specific packages from the durabletask-python/durabletask-azuremanaged directory + + ```bash + pip3 install -e . + ``` + +1. Grant yourself the `Durable Task Data Contributor` role over your scheduler + +## Running the examples + +Now, you can simply execute any of the examples in this directory using `python3`: + +```sh +python3 dts_activity_sequence.py +``` diff --git a/examples/dts/dts_activity_sequence.py b/examples/dts/dts_activity_sequence.py new file mode 100644 index 0000000..2ff3c22 --- /dev/null +++ b/examples/dts/dts_activity_sequence.py @@ -0,0 +1,71 @@ +"""End-to-end sample that demonstrates how to configure an orchestrator +that calls an activity function in a sequence and prints the outputs.""" +import os + +from azure.identity import DefaultAzureCredential + +from durabletask import client, task +from durabletask.azuremanaged.client import DurableTaskSchedulerClient +from durabletask.azuremanaged.worker import DurableTaskSchedulerWorker + + +def hello(ctx: task.ActivityContext, name: str) -> str: + """Activity function that returns a greeting""" + return f'Hello {name}!' + + +def sequence(ctx: task.OrchestrationContext, _): + """Orchestrator function that calls the 'hello' activity function in a sequence""" + # call "hello" activity function in a sequence + result1 = yield ctx.call_activity(hello, input='Tokyo') + result2 = yield ctx.call_activity(hello, input='Seattle') + result3 = yield ctx.call_activity(hello, input='London') + + # return an array of results + return [result1, result2, result3] + + +# Read the environment variable +taskhub_name = os.getenv("TASKHUB") + +# Check if the variable exists +if taskhub_name: + print(f"The value of TASKHUB is: {taskhub_name}") +else: + print("TASKHUB is not set. Please set the TASKHUB environment variable to the name of the taskhub you wish to use") + print("If you are using windows powershell, run the following: $env:TASKHUB=\"\"") + print("If you are using bash, run the following: export TASKHUB=\"\"") + exit() + +# Read the environment variable +endpoint = os.getenv("ENDPOINT") + +# Check if the variable exists +if endpoint: + print(f"The value of ENDPOINT is: {endpoint}") +else: + print("ENDPOINT is not set. Please set the ENDPOINT environment variable to the endpoint of the scheduler") + print("If you are using windows powershell, run the following: $env:ENDPOINT=\"\"") + print("If you are using bash, run the following: export ENDPOINT=\"\"") + exit() + +# Note that any azure-identity credential type and configuration can be used here as DTS supports various credential +# types such as Managed Identities +credential = DefaultAzureCredential() + +# configure and start the worker +with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=credential) as w: + w.add_orchestrator(sequence) + w.add_activity(hello) + w.start() + + # Construct the client and run the orchestrations + c = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=credential) + instance_id = c.schedule_new_orchestration(sequence) + state = c.wait_for_orchestration_completion(instance_id, timeout=60) + if state and state.runtime_status == client.OrchestrationStatus.COMPLETED: + print(f'Orchestration completed! Result: {state.serialized_output}') + elif state: + print(f'Orchestration failed: {state.failure_details}') diff --git a/examples/dts/dts_fanout_fanin.py b/examples/dts/dts_fanout_fanin.py new file mode 100644 index 0000000..8ab68df --- /dev/null +++ b/examples/dts/dts_fanout_fanin.py @@ -0,0 +1,96 @@ +"""End-to-end sample that demonstrates how to configure an orchestrator +that a dynamic number activity functions in parallel, waits for them all +to complete, and prints an aggregate summary of the outputs.""" +import os +import random +import time + +from azure.identity import DefaultAzureCredential + +from durabletask import client, task +from durabletask.azuremanaged.client import DurableTaskSchedulerClient +from durabletask.azuremanaged.worker import DurableTaskSchedulerWorker + + +def get_work_items(ctx: task.ActivityContext, _) -> list[str]: + """Activity function that returns a list of work items""" + # return a random number of work items + count = random.randint(2, 10) + print(f'generating {count} work items...') + return [f'work item {i}' for i in range(count)] + + +def process_work_item(ctx: task.ActivityContext, item: str) -> int: + """Activity function that returns a result for a given work item""" + print(f'processing work item: {item}') + + # simulate some work that takes a variable amount of time + time.sleep(random.random() * 5) + + # return a result for the given work item, which is also a random number in this case + return random.randint(0, 10) + + +def orchestrator(ctx: task.OrchestrationContext, _): + """Orchestrator function that calls the 'get_work_items' and 'process_work_item' + activity functions in parallel, waits for them all to complete, and prints + an aggregate summary of the outputs""" + + work_items: list[str] = yield ctx.call_activity(get_work_items) + + # execute the work-items in parallel and wait for them all to return + tasks = [ctx.call_activity(process_work_item, input=item) for item in work_items] + results: list[int] = yield task.when_all(tasks) + + # return an aggregate summary of the results + return { + 'work_items': work_items, + 'results': results, + 'total': sum(results), + } + + +# Read the environment variable +taskhub_name = os.getenv("TASKHUB") + +# Check if the variable exists +if taskhub_name: + print(f"The value of TASKHUB is: {taskhub_name}") +else: + print("TASKHUB is not set. Please set the TASKHUB environment variable to the name of the taskhub you wish to use") + print("If you are using windows powershell, run the following: $env:TASKHUB=\"\"") + print("If you are using bash, run the following: export TASKHUB=\"\"") + exit() + +# Read the environment variable +endpoint = os.getenv("ENDPOINT") + +# Check if the variable exists +if endpoint: + print(f"The value of ENDPOINT is: {endpoint}") +else: + print("ENDPOINT is not set. Please set the ENDPOINT environment variable to the endpoint of the scheduler") + print("If you are using windows powershell, run the following: $env:ENDPOINT=\"\"") + print("If you are using bash, run the following: export ENDPOINT=\"\"") + exit() + +credential = DefaultAzureCredential() + +# configure and start the worker +with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=credential) as w: + w.add_orchestrator(orchestrator) + w.add_activity(process_work_item) + w.add_activity(get_work_items) + w.start() + + # create a client, start an orchestration, and wait for it to finish + c = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=credential) + instance_id = c.schedule_new_orchestration(orchestrator) + state = c.wait_for_orchestration_completion(instance_id, timeout=30) + if state and state.runtime_status == client.OrchestrationStatus.COMPLETED: + print(f'Orchestration completed! Result: {state.serialized_output}') + elif state: + print(f'Orchestration failed: {state.failure_details}') + exit() diff --git a/requirements.txt b/requirements.txt index a31419b..0da7d46 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,3 +3,5 @@ grpcio>=1.60.0 # 1.60.0 is the version introducing protobuf 1.25.X support, newe protobuf pytest pytest-cov +azure-core +azure-identity \ No newline at end of file diff --git a/tests/test_client.py b/tests/test_client.py index caacf65..64bbec8 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -1,36 +1,36 @@ from unittest.mock import patch, ANY -from durabletask.internal.shared import (DefaultClientInterceptorImpl, - get_default_host_address, +from durabletask.internal.shared import (get_default_host_address, get_grpc_channel) +from durabletask.internal.grpc_interceptor import DefaultClientInterceptorImpl HOST_ADDRESS = 'localhost:50051' METADATA = [('key1', 'value1'), ('key2', 'value2')] - +INTERCEPTORS = [DefaultClientInterceptorImpl(METADATA)] def test_get_grpc_channel_insecure(): with patch('grpc.insecure_channel') as mock_channel: - get_grpc_channel(HOST_ADDRESS, METADATA, False) + get_grpc_channel(HOST_ADDRESS, False, interceptors=INTERCEPTORS) mock_channel.assert_called_once_with(HOST_ADDRESS) def test_get_grpc_channel_secure(): with patch('grpc.secure_channel') as mock_channel, patch( 'grpc.ssl_channel_credentials') as mock_credentials: - get_grpc_channel(HOST_ADDRESS, METADATA, True) + get_grpc_channel(HOST_ADDRESS, True, interceptors=INTERCEPTORS) mock_channel.assert_called_once_with(HOST_ADDRESS, mock_credentials.return_value) def test_get_grpc_channel_default_host_address(): with patch('grpc.insecure_channel') as mock_channel: - get_grpc_channel(None, METADATA, False) + get_grpc_channel(None, False, interceptors=INTERCEPTORS) mock_channel.assert_called_once_with(get_default_host_address()) def test_get_grpc_channel_with_metadata(): with patch('grpc.insecure_channel') as mock_channel, patch( 'grpc.intercept_channel') as mock_intercept_channel: - get_grpc_channel(HOST_ADDRESS, METADATA, False) + get_grpc_channel(HOST_ADDRESS, False, interceptors=INTERCEPTORS) mock_channel.assert_called_once_with(HOST_ADDRESS) mock_intercept_channel.assert_called_once() @@ -48,41 +48,41 @@ def test_grpc_channel_with_host_name_protocol_stripping(): host_name = "myserver.com:1234" prefix = "grpc://" - get_grpc_channel(prefix + host_name, METADATA) + get_grpc_channel(prefix + host_name, interceptors=INTERCEPTORS) mock_insecure_channel.assert_called_with(host_name) prefix = "http://" - get_grpc_channel(prefix + host_name, METADATA) + get_grpc_channel(prefix + host_name, interceptors=INTERCEPTORS) mock_insecure_channel.assert_called_with(host_name) prefix = "HTTP://" - get_grpc_channel(prefix + host_name, METADATA) + get_grpc_channel(prefix + host_name, interceptors=INTERCEPTORS) mock_insecure_channel.assert_called_with(host_name) prefix = "GRPC://" - get_grpc_channel(prefix + host_name, METADATA) + get_grpc_channel(prefix + host_name, interceptors=INTERCEPTORS) mock_insecure_channel.assert_called_with(host_name) prefix = "" - get_grpc_channel(prefix + host_name, METADATA) + get_grpc_channel(prefix + host_name, interceptors=INTERCEPTORS) mock_insecure_channel.assert_called_with(host_name) prefix = "grpcs://" - get_grpc_channel(prefix + host_name, METADATA) + get_grpc_channel(prefix + host_name, interceptors=INTERCEPTORS) mock_secure_channel.assert_called_with(host_name, ANY) prefix = "https://" - get_grpc_channel(prefix + host_name, METADATA) + get_grpc_channel(prefix + host_name, interceptors=INTERCEPTORS) mock_secure_channel.assert_called_with(host_name, ANY) prefix = "HTTPS://" - get_grpc_channel(prefix + host_name, METADATA) + get_grpc_channel(prefix + host_name, interceptors=INTERCEPTORS) mock_secure_channel.assert_called_with(host_name, ANY) prefix = "GRPCS://" - get_grpc_channel(prefix + host_name, METADATA) + get_grpc_channel(prefix + host_name, interceptors=INTERCEPTORS) mock_secure_channel.assert_called_with(host_name, ANY) prefix = "" - get_grpc_channel(prefix + host_name, METADATA, True) + get_grpc_channel(prefix + host_name, True, interceptors=INTERCEPTORS) mock_secure_channel.assert_called_with(host_name, ANY) \ No newline at end of file From 6d3ad8f06af017ee9286b3c2b35e80ac164f65bd Mon Sep 17 00:00:00 2001 From: Bernd Verst Date: Mon, 10 Mar 2025 13:56:24 -0700 Subject: [PATCH 07/20] Update pr-validation.yml Signed-off-by: Albert Callarisa --- .github/workflows/pr-validation.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/pr-validation.yml b/.github/workflows/pr-validation.yml index 70ff470..4b909cf 100644 --- a/.github/workflows/pr-validation.yml +++ b/.github/workflows/pr-validation.yml @@ -8,6 +8,7 @@ on: branches: [ "main" ] pull_request: branches: [ "main" ] + merge_group: jobs: build: From 75f573bc67244856b2990d4c0916d9a868b41708 Mon Sep 17 00:00:00 2001 From: Ryan Lettieri <67934986+RyanLettieri@users.noreply.github.com> Date: Mon, 10 Mar 2025 14:56:59 -0600 Subject: [PATCH 08/20] Making token credential optional (#45) Signed-off-by: Ryan Lettieri Signed-off-by: Albert Callarisa --- durabletask-azuremanaged/durabletask/azuremanaged/client.py | 3 ++- .../azuremanaged/internal/durabletask_grpc_interceptor.py | 4 +++- durabletask-azuremanaged/durabletask/azuremanaged/worker.py | 3 ++- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/durabletask-azuremanaged/durabletask/azuremanaged/client.py b/durabletask-azuremanaged/durabletask/azuremanaged/client.py index f641eae..1d8cecd 100644 --- a/durabletask-azuremanaged/durabletask/azuremanaged/client.py +++ b/durabletask-azuremanaged/durabletask/azuremanaged/client.py @@ -2,6 +2,7 @@ # Licensed under the MIT License. from azure.core.credentials import TokenCredential +from typing import Optional from durabletask.azuremanaged.internal.durabletask_grpc_interceptor import \ DTSDefaultClientInterceptorImpl @@ -13,7 +14,7 @@ class DurableTaskSchedulerClient(TaskHubGrpcClient): def __init__(self, *, host_address: str, taskhub: str, - token_credential: TokenCredential, + token_credential: Optional[TokenCredential], secure_channel: bool = True): if not taskhub: diff --git a/durabletask-azuremanaged/durabletask/azuremanaged/internal/durabletask_grpc_interceptor.py b/durabletask-azuremanaged/durabletask/azuremanaged/internal/durabletask_grpc_interceptor.py index a23cac9..077905e 100644 --- a/durabletask-azuremanaged/durabletask/azuremanaged/internal/durabletask_grpc_interceptor.py +++ b/durabletask-azuremanaged/durabletask/azuremanaged/internal/durabletask_grpc_interceptor.py @@ -2,6 +2,8 @@ # Licensed under the MIT License. import grpc +from typing import Optional + from azure.core.credentials import TokenCredential from durabletask.azuremanaged.internal.access_token_manager import \ @@ -15,7 +17,7 @@ class DTSDefaultClientInterceptorImpl (DefaultClientInterceptorImpl): StreamUnaryClientInterceptor and StreamStreamClientInterceptor from grpc to add an interceptor to add additional headers to all calls as needed.""" - def __init__(self, token_credential: TokenCredential, taskhub_name: str): + def __init__(self, token_credential: Optional[TokenCredential], taskhub_name: str): self._metadata = [("taskhub", taskhub_name)] super().__init__(self._metadata) diff --git a/durabletask-azuremanaged/durabletask/azuremanaged/worker.py b/durabletask-azuremanaged/durabletask/azuremanaged/worker.py index d10c2f7..8bdff3d 100644 --- a/durabletask-azuremanaged/durabletask/azuremanaged/worker.py +++ b/durabletask-azuremanaged/durabletask/azuremanaged/worker.py @@ -2,6 +2,7 @@ # Licensed under the MIT License. from azure.core.credentials import TokenCredential +from typing import Optional from durabletask.azuremanaged.internal.durabletask_grpc_interceptor import \ DTSDefaultClientInterceptorImpl @@ -13,7 +14,7 @@ class DurableTaskSchedulerWorker(TaskHubGrpcWorker): def __init__(self, *, host_address: str, taskhub: str, - token_credential: TokenCredential, + token_credential: Optional[TokenCredential], secure_channel: bool = True): if not taskhub: From aae026732bfb8bb54928d1c5a9052d50c66c7e8e Mon Sep 17 00:00:00 2001 From: Ryan Lettieri <67934986+RyanLettieri@users.noreply.github.com> Date: Fri, 21 Mar 2025 12:22:08 -0600 Subject: [PATCH 09/20] Creation of pipeline to publish dts python package to pypi (#43) * Creating of pipeline to publish dts python package to pypi Signed-off-by: Ryan Lettieri * Upgrading version of durabletask-azuremanaged from 0.1b1 to 0.1 Signed-off-by: Ryan Lettieri * Updating versioning on packages Signed-off-by: Ryan Lettieri * Incrementing version to allign with pypi Signed-off-by: Ryan Lettieri * Adressing majority of first round of feedback Signed-off-by: Ryan Lettieri * Updating pipeline to have linting Signed-off-by: Ryan Lettieri * Updating versions in pyproject.toml Signed-off-by: Ryan Lettieri * Updating working dirs in yml Signed-off-by: Ryan Lettieri * Adding requirements.txt Signed-off-by: Ryan Lettieri * Moving durabletask tests into specific dir and more Signed-off-by: Ryan Lettieri * Fixing more paths Signed-off-by: Ryan Lettieri * ATtemptign to ignore durabletask-azuremanaged folder Signed-off-by: Ryan Lettieri * installing dts dependencies Signed-off-by: Ryan Lettieri * Changing path for requirements.txt Signed-off-by: Ryan Lettieri * Moving init.py Signed-off-by: Ryan Lettieri * Updating readme and some tests Signed-off-by: Ryan Lettieri * Running all dts tests in publish pipeline Signed-off-by: Ryan Lettieri * Removing PYTHONPATH and installing regular deps Signed-off-by: Ryan Lettieri * Adding timeout to dts orchestration e2e test Signed-off-by: Ryan Lettieri * Removing suspend and continue as new tests from dts Signed-off-by: Ryan Lettieri * Removing raise event timeout tests Signed-off-by: Ryan Lettieri * Only runnign publish on tag push Signed-off-by: Ryan Lettieri * Changing dts action to run on tag creation Signed-off-by: Ryan Lettieri * Updating tag name Signed-off-by: Ryan Lettieri * Adressing review feedback Signed-off-by: Ryan Lettieri * Fixing run requirements in actions and adding exit-zero Signed-off-by: Ryan Lettieri * Update .github/workflows/publish-dts-sdk.yml --------- Signed-off-by: Ryan Lettieri Co-authored-by: Bernd Verst Signed-off-by: Albert Callarisa --- .github/workflows/pr-validation.yml | 109 ++-- .github/workflows/publish-dts-sdk.yml | 110 ++++ durabletask-azuremanaged/pyproject.toml | 2 +- examples/dts/README.md | 30 +- examples/dts/requirements.txt | 6 + pyproject.toml | 2 +- tests/durabletask-azuremanaged/__init__.py | 0 .../test_dts_activity_sequence.py | 69 +++ .../test_dts_orchestration_e2e.py | 503 ++++++++++++++++++ .../test_activity_executor.py | 0 tests/{ => durabletask}/test_client.py | 0 .../test_orchestration_e2e.py | 0 .../test_orchestration_executor.py | 0 13 files changed, 777 insertions(+), 54 deletions(-) create mode 100644 .github/workflows/publish-dts-sdk.yml create mode 100644 examples/dts/requirements.txt create mode 100644 tests/durabletask-azuremanaged/__init__.py create mode 100644 tests/durabletask-azuremanaged/test_dts_activity_sequence.py create mode 100644 tests/durabletask-azuremanaged/test_dts_orchestration_e2e.py rename tests/{ => durabletask}/test_activity_executor.py (100%) rename tests/{ => durabletask}/test_client.py (100%) rename tests/{ => durabletask}/test_orchestration_e2e.py (100%) rename tests/{ => durabletask}/test_orchestration_executor.py (100%) diff --git a/.github/workflows/pr-validation.yml b/.github/workflows/pr-validation.yml index 4b909cf..dddcc53 100644 --- a/.github/workflows/pr-validation.yml +++ b/.github/workflows/pr-validation.yml @@ -1,51 +1,58 @@ -# This workflow will install Python dependencies, run tests and lint with a variety of Python versions -# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python - -name: Build Validation - -on: - push: - branches: [ "main" ] - pull_request: - branches: [ "main" ] - merge_group: - -jobs: - build: - - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] - - steps: - - uses: actions/checkout@v4 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v5 - with: - python-version: ${{ matrix.python-version }} - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install flake8 pytest - pip install -r requirements.txt - - name: Lint with flake8 - run: | - flake8 . --count --show-source --statistics --exit-zero - - name: Pytest unit tests - run: | - pytest -m "not e2e" --verbose - - # Sidecar for running e2e tests requires Go SDK - - name: Install Go SDK - uses: actions/setup-go@v5 - with: - go-version: 'stable' - - # Install and run the durabletask-go sidecar for running e2e tests - - name: Pytest e2e tests - run: | - go install github.com/microsoft/durabletask-go@main - durabletask-go --port 4001 & - pytest -m "e2e" --verbose +# This workflow will install Python dependencies, run tests and lint with a variety of Python versions +# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python + +name: Build Validation + +on: + push: + branches: [ "main" ] + pull_request: + branches: [ "main" ] + merge_group: + +jobs: + build: + + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] + + steps: + - uses: actions/checkout@v4 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + - name: Install durabletask dependencies + run: | + python -m pip install --upgrade pip + pip install flake8 pytest + pip install -r requirements.txt + - name: Install durabletask-azuremanaged dependencies + working-directory: examples/dts + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + - name: Lint with flake8 + run: | + flake8 . --count --show-source --statistics --exit-zero + - name: Pytest unit tests + working-directory: tests/durabletask + run: | + pytest -m "not e2e and not dts" --verbose + + # Sidecar for running e2e tests requires Go SDK + - name: Install Go SDK + uses: actions/setup-go@v5 + with: + go-version: 'stable' + + # Install and run the durabletask-go sidecar for running e2e tests + - name: Pytest e2e tests + working-directory: tests/durabletask + run: | + go install github.com/microsoft/durabletask-go@main + durabletask-go --port 4001 & + pytest -m "e2e and not dts" --verbose diff --git a/.github/workflows/publish-dts-sdk.yml b/.github/workflows/publish-dts-sdk.yml new file mode 100644 index 0000000..de773f2 --- /dev/null +++ b/.github/workflows/publish-dts-sdk.yml @@ -0,0 +1,110 @@ +name: Publish Durable Task Scheduler to PyPI + +on: + push: + branches: + - "main" + tags: + - "azuremanaged-v*" # Only run for tags starting with "azuremanaged-v" + pull_request: + branches: + - "main" + +jobs: + lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Set up Python 3.12 + uses: actions/setup-python@v5 + with: + python-version: 3.12 + - name: Install dependencies + working-directory: durabletask-azuremanaged + run: | + python -m pip install --upgrade pip + pip install setuptools wheel tox + pip install flake8 + - name: Run flake8 Linter + working-directory: durabletask-azuremanaged + run: flake8 . + + run-docker-tests: + env: + EMULATOR_VERSION: "v0.0.5" # Define the variable + needs: lint + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Pull Docker image + run: docker pull mcr.microsoft.com/dts/dts-emulator:$EMULATOR_VERSION + + - name: Run Docker container + run: | + docker run --name dtsemulator -d -p 8080:8080 mcr.microsoft.com/dts/dts-emulator:$EMULATOR_VERSION + + - name: Wait for container to be ready + run: sleep 10 # Adjust if your service needs more time to start + + - name: Set environment variables + run: | + echo "TASKHUB=default" >> $GITHUB_ENV + echo "ENDPOINT=http://localhost:8080" >> $GITHUB_ENV + + - name: Install durabletask dependencies + run: | + python -m pip install --upgrade pip + pip install flake8 pytest + pip install -r requirements.txt + + - name: Install durabletask-azuremanaged dependencies + working-directory: examples/dts + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + + - name: Run the tests + working-directory: tests/durabletask-azuremanaged + run: | + pytest -m "dts" --verbose + + publish: + if: startsWith(github.ref, 'refs/tags/azuremanaged-v') # Only run if a matching tag is pushed + needs: run-docker-tests + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Extract version from tag + run: echo "VERSION=${GITHUB_REF#refs/tags/azuremanaged-v}" >> $GITHUB_ENV # Extract version from the tag + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.12" # Adjust Python version as needed + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install build twine + + - name: Build package from directory durabletask-azuremanaged + working-directory: durabletask-azuremanaged + run: | + python -m build + + - name: Check package + working-directory: durabletask-azuremanaged + run: | + twine check dist/* + + - name: Publish package to PyPI + env: + TWINE_USERNAME: __token__ + TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN_AZUREMANAGED }} # Store your PyPI API token in GitHub Secrets + working-directory: durabletask-azuremanaged + run: | + twine upload dist/* \ No newline at end of file diff --git a/durabletask-azuremanaged/pyproject.toml b/durabletask-azuremanaged/pyproject.toml index ac6be6f..c4c8a96 100644 --- a/durabletask-azuremanaged/pyproject.toml +++ b/durabletask-azuremanaged/pyproject.toml @@ -9,7 +9,7 @@ build-backend = "setuptools.build_meta" [project] name = "durabletask.azuremanaged" -version = "0.1b1" +version = "0.1.2" description = "Extensions for the Durable Task Python SDK for integrating with the Durable Task Scheduler in Azure" keywords = [ "durable", diff --git a/examples/dts/README.md b/examples/dts/README.md index 9b4a3fd..8df2b75 100644 --- a/examples/dts/README.md +++ b/examples/dts/README.md @@ -4,8 +4,13 @@ This directory contains examples of how to author durable orchestrations using t ## Prerequisites -All the examples assume that you have a Durable Task Scheduler taskhub created. +There are 2 separate ways to run an example: +1. Using the emulator. +2. Using a real scheduler and taskhub. +All the examples by defualt assume that you have a Durable Task Scheduler taskhub created. + +## Running with a scheduler and taskhub resource The simplest way to create a taskhub is by using the az cli commands: 1. Create a scheduler: @@ -46,6 +51,29 @@ The simplest way to create a taskhub is by using the az cli commands: 1. Grant yourself the `Durable Task Data Contributor` role over your scheduler +## Running with the emulator +The emulator is a simulation of a scheduler and taskhub. It is the 'backend' of the durabletask-azuremanaged system packaged up into an easy to use docker container. For these steps, it is assumed that you are using port 8080. + +In order to use the emulator for the examples, perform the following steps: +1. Install docker if it is not already installed. + +2. Pull down the docker image for the emulator: + `docker pull mcr.microsoft.com/dts/dts-emulator:v0.0.4` + +3. Run the emulator and wait a few seconds for the container to be ready: +`docker run --name dtsemulator -d -p 8080:8080 mcr.microsoft.com/dts/dts-emulator:v0.0.4` + +4. Set the environment variables that are referenced and used in the examples: + 1. If you are using windows powershell: + `$env:TASKHUB="default"` + `$env:ENDPOINT="http://localhost:8080"` + 2. If you are using bash: + `export TASKHUB=default` + `export ENDPOINT=http://localhost:8080` + +5. Finally, edit the examples to change the `token_credential` input of both the `DurableTaskSchedulerWorker` and `DurableTaskSchedulerClient` to a value of `None` + + ## Running the examples Now, you can simply execute any of the examples in this directory using `python3`: diff --git a/examples/dts/requirements.txt b/examples/dts/requirements.txt new file mode 100644 index 0000000..b12d5a2 --- /dev/null +++ b/examples/dts/requirements.txt @@ -0,0 +1,6 @@ +autopep8 +grpcio>=1.60.0 # 1.60.0 is the version introducing protobuf 1.25.X support, newer versions are backwards compatible +protobuf +azure-identity +durabletask-azuremanaged +durabletask \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 577824b..d3d9429 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -9,7 +9,7 @@ build-backend = "setuptools.build_meta" [project] name = "durabletask" -version = "0.2b1" +version = "0.2.0" description = "A Durable Task Client SDK for Python" keywords = [ "durable", diff --git a/tests/durabletask-azuremanaged/__init__.py b/tests/durabletask-azuremanaged/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/durabletask-azuremanaged/test_dts_activity_sequence.py b/tests/durabletask-azuremanaged/test_dts_activity_sequence.py new file mode 100644 index 0000000..c875e49 --- /dev/null +++ b/tests/durabletask-azuremanaged/test_dts_activity_sequence.py @@ -0,0 +1,69 @@ +"""End-to-end sample that demonstrates how to configure an orchestrator +that calls an activity function in a sequence and prints the outputs.""" +import os + +from durabletask import client, task +from durabletask.azuremanaged.client import DurableTaskSchedulerClient +from durabletask.azuremanaged.worker import DurableTaskSchedulerWorker + +import pytest + + +pytestmark = pytest.mark.dts + +def hello(ctx: task.ActivityContext, name: str) -> str: + """Activity function that returns a greeting""" + return f'Hello {name}!' + + +def sequence(ctx: task.OrchestrationContext, _): + """Orchestrator function that calls the 'hello' activity function in a sequence""" + # call "hello" activity function in a sequence + result1 = yield ctx.call_activity(hello, input='Tokyo') + result2 = yield ctx.call_activity(hello, input='Seattle') + result3 = yield ctx.call_activity(hello, input='London') + + # return an array of results + return [result1, result2, result3] + + +# Read the environment variable +taskhub_name = os.getenv("TASKHUB") + +# Check if the variable exists +if taskhub_name: + print(f"The value of TASKHUB is: {taskhub_name}") +else: + print("TASKHUB is not set. Please set the TASKHUB environment variable to the name of the taskhub you wish to use") + print("If you are using windows powershell, run the following: $env:TASKHUB=\"\"") + print("If you are using bash, run the following: export TASKHUB=\"\"") + exit() + +# Read the environment variable +endpoint = os.getenv("ENDPOINT") + +# Check if the variable exists +if endpoint: + print(f"The value of ENDPOINT is: {endpoint}") +else: + print("ENDPOINT is not set. Please set the ENDPOINT environment variable to the endpoint of the scheduler") + print("If you are using windows powershell, run the following: $env:ENDPOINT=\"\"") + print("If you are using bash, run the following: export ENDPOINT=\"\"") + exit() + +# configure and start the worker +with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=None) as w: + w.add_orchestrator(sequence) + w.add_activity(hello) + w.start() + + # Construct the client and run the orchestrations + c = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=None) + instance_id = c.schedule_new_orchestration(sequence) + state = c.wait_for_orchestration_completion(instance_id, timeout=60) + if state and state.runtime_status == client.OrchestrationStatus.COMPLETED: + print(f'Orchestration completed! Result: {state.serialized_output}') + elif state: + print(f'Orchestration failed: {state.failure_details}') diff --git a/tests/durabletask-azuremanaged/test_dts_orchestration_e2e.py b/tests/durabletask-azuremanaged/test_dts_orchestration_e2e.py new file mode 100644 index 0000000..f10e605 --- /dev/null +++ b/tests/durabletask-azuremanaged/test_dts_orchestration_e2e.py @@ -0,0 +1,503 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +import json +import threading +import time +import os +from datetime import timedelta + +import pytest + +from durabletask import client, task +from durabletask.azuremanaged.client import DurableTaskSchedulerClient +from durabletask.azuremanaged.worker import DurableTaskSchedulerWorker + +# NOTE: These tests assume a sidecar process is running. Example command: +# docker run --name durabletask-sidecar -p 4001:4001 --env 'DURABLETASK_SIDECAR_LOGLEVEL=Debug' --rm cgillum/durabletask-sidecar:latest start --backend Emulator +pytestmark = pytest.mark.dts + +# Read the environment variables +taskhub_name = os.getenv("TASKHUB", "default") +endpoint = os.getenv("ENDPOINT", "http://localhost:8080") + +def test_empty_orchestration(): + + invoked = False + + def empty_orchestrator(ctx: task.OrchestrationContext, _): + nonlocal invoked # don't do this in a real app! + invoked = True + + # Start a worker, which will connect to the sidecar in a background thread + with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=None) as w: + w.add_orchestrator(empty_orchestrator) + w.start() + + c = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=None) + id = c.schedule_new_orchestration(empty_orchestrator) + state = c.wait_for_orchestration_completion(id, timeout=30) + + assert invoked + assert state is not None + assert state.name == task.get_name(empty_orchestrator) + assert state.instance_id == id + assert state.failure_details is None + assert state.runtime_status == client.OrchestrationStatus.COMPLETED + assert state.serialized_input is None + assert state.serialized_output is None + assert state.serialized_custom_status is None + + +def test_activity_sequence(): + + def plus_one(_: task.ActivityContext, input: int) -> int: + return input + 1 + + def sequence(ctx: task.OrchestrationContext, start_val: int): + numbers = [start_val] + current = start_val + for _ in range(10): + current = yield ctx.call_activity(plus_one, input=current) + numbers.append(current) + return numbers + + # Start a worker, which will connect to the sidecar in a background thread + with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=None) as w: + w.add_orchestrator(sequence) + w.add_activity(plus_one) + w.start() + + task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=None) + id = task_hub_client.schedule_new_orchestration(sequence, input=1) + state = task_hub_client.wait_for_orchestration_completion( + id, timeout=30) + + assert state is not None + assert state.name == task.get_name(sequence) + assert state.instance_id == id + assert state.runtime_status == client.OrchestrationStatus.COMPLETED + assert state.failure_details is None + assert state.serialized_input == json.dumps(1) + assert state.serialized_output == json.dumps([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) + assert state.serialized_custom_status is None + + +def test_activity_error_handling(): + + def throw(_: task.ActivityContext, input: int) -> int: + raise RuntimeError("Kah-BOOOOM!!!") + + compensation_counter = 0 + + def increment_counter(ctx, _): + nonlocal compensation_counter + compensation_counter += 1 + + def orchestrator(ctx: task.OrchestrationContext, input: int): + error_msg = "" + try: + yield ctx.call_activity(throw, input=input) + except task.TaskFailedError as e: + error_msg = e.details.message + + # compensating actions + yield ctx.call_activity(increment_counter) + yield ctx.call_activity(increment_counter) + + return error_msg + + # Start a worker, which will connect to the sidecar in a background thread + with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=None) as w: + w.add_orchestrator(orchestrator) + w.add_activity(throw) + w.add_activity(increment_counter) + w.start() + + task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=None) + id = task_hub_client.schedule_new_orchestration(orchestrator, input=1) + state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) + + assert state is not None + assert state.name == task.get_name(orchestrator) + assert state.instance_id == id + assert state.runtime_status == client.OrchestrationStatus.COMPLETED + assert state.serialized_output == json.dumps("Kah-BOOOOM!!!") + assert state.failure_details is None + assert state.serialized_custom_status is None + assert compensation_counter == 2 + + +def test_sub_orchestration_fan_out(): + threadLock = threading.Lock() + activity_counter = 0 + + def increment(ctx, _): + with threadLock: + nonlocal activity_counter + activity_counter += 1 + + def orchestrator_child(ctx: task.OrchestrationContext, activity_count: int): + for _ in range(activity_count): + yield ctx.call_activity(increment) + + def parent_orchestrator(ctx: task.OrchestrationContext, count: int): + # Fan out to multiple sub-orchestrations + tasks = [] + for _ in range(count): + tasks.append(ctx.call_sub_orchestrator( + orchestrator_child, input=3)) + # Wait for all sub-orchestrations to complete + yield task.when_all(tasks) + + # Start a worker, which will connect to the sidecar in a background thread + with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=None) as w: + w.add_activity(increment) + w.add_orchestrator(orchestrator_child) + w.add_orchestrator(parent_orchestrator) + w.start() + + task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=None) + id = task_hub_client.schedule_new_orchestration(parent_orchestrator, input=10) + state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) + + assert state is not None + assert state.runtime_status == client.OrchestrationStatus.COMPLETED + assert state.failure_details is None + assert activity_counter == 30 + + +def test_wait_for_multiple_external_events(): + def orchestrator(ctx: task.OrchestrationContext, _): + a = yield ctx.wait_for_external_event('A') + b = yield ctx.wait_for_external_event('B') + c = yield ctx.wait_for_external_event('C') + return [a, b, c] + + # Start a worker, which will connect to the sidecar in a background thread + with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=None) as w: + w.add_orchestrator(orchestrator) + w.start() + + # Start the orchestration and immediately raise events to it. + task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=None) + id = task_hub_client.schedule_new_orchestration(orchestrator) + task_hub_client.raise_orchestration_event(id, 'A', data='a') + task_hub_client.raise_orchestration_event(id, 'B', data='b') + task_hub_client.raise_orchestration_event(id, 'C', data='c') + state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) + + assert state is not None + assert state.runtime_status == client.OrchestrationStatus.COMPLETED + assert state.serialized_output == json.dumps(['a', 'b', 'c']) + + +# @pytest.mark.parametrize("raise_event", [True, False]) +# def test_wait_for_external_event_timeout(raise_event: bool): +# def orchestrator(ctx: task.OrchestrationContext, _): +# approval: task.Task[bool] = ctx.wait_for_external_event('Approval') +# timeout = ctx.create_timer(timedelta(seconds=3)) +# winner = yield task.when_any([approval, timeout]) +# if winner == approval: +# return "approved" +# else: +# return "timed out" + +# # Start a worker, which will connect to the sidecar in a background thread +# with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, +# taskhub=taskhub_name, token_credential=None) as w: +# w.add_orchestrator(orchestrator) +# w.start() + +# # Start the orchestration and immediately raise events to it. +# task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, +# taskhub=taskhub_name, token_credential=None) +# id = task_hub_client.schedule_new_orchestration(orchestrator) +# if raise_event: +# task_hub_client.raise_orchestration_event(id, 'Approval') +# state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) + +# assert state is not None +# assert state.runtime_status == client.OrchestrationStatus.COMPLETED +# if raise_event: +# assert state.serialized_output == json.dumps("approved") +# else: +# assert state.serialized_output == json.dumps("timed out") + + +# def test_suspend_and_resume(): +# def orchestrator(ctx: task.OrchestrationContext, _): +# result = yield ctx.wait_for_external_event("my_event") +# return result + +# # Start a worker, which will connect to the sidecar in a background thread +# with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, +# taskhub=taskhub_name, token_credential=None) as w: +# w.add_orchestrator(orchestrator) +# w.start() + +# task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, +# taskhub=taskhub_name, token_credential=None) +# id = task_hub_client.schedule_new_orchestration(orchestrator) +# state = task_hub_client.wait_for_orchestration_start(id, timeout=30) +# assert state is not None + +# # Suspend the orchestration and wait for it to go into the SUSPENDED state +# task_hub_client.suspend_orchestration(id) +# counter = 0 +# while state.runtime_status == client.OrchestrationStatus.RUNNING and counter < 1200: +# time.sleep(0.1) +# state = task_hub_client.get_orchestration_state(id) +# assert state is not None +# counter+=1 +# assert state.runtime_status == client.OrchestrationStatus.SUSPENDED + +# # Raise an event to the orchestration and confirm that it does NOT complete +# task_hub_client.raise_orchestration_event(id, "my_event", data=42) +# try: +# state = task_hub_client.wait_for_orchestration_completion(id, timeout=3) +# assert False, "Orchestration should not have completed" +# except TimeoutError: +# pass + +# # Resume the orchestration and wait for it to complete +# task_hub_client.resume_orchestration(id) +# state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) +# assert state is not None +# assert state.runtime_status == client.OrchestrationStatus.COMPLETED +# assert state.serialized_output == json.dumps(42) + + +def test_terminate(): + def orchestrator(ctx: task.OrchestrationContext, _): + result = yield ctx.wait_for_external_event("my_event") + return result + + # Start a worker, which will connect to the sidecar in a background thread + with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=None) as w: + w.add_orchestrator(orchestrator) + w.start() + + task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=None) + id = task_hub_client.schedule_new_orchestration(orchestrator) + state = task_hub_client.wait_for_orchestration_start(id, timeout=30) + assert state is not None + assert state.runtime_status == client.OrchestrationStatus.RUNNING + + task_hub_client.terminate_orchestration(id, output="some reason for termination") + state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) + assert state is not None + assert state.runtime_status == client.OrchestrationStatus.TERMINATED + assert state.serialized_output == json.dumps("some reason for termination") + +def test_terminate_recursive(): + def root(ctx: task.OrchestrationContext, _): + result = yield ctx.call_sub_orchestrator(child) + return result + def child(ctx: task.OrchestrationContext, _): + result = yield ctx.wait_for_external_event("my_event") + return result + + # Start a worker, which will connect to the sidecar in a background thread + with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=None) as w: + w.add_orchestrator(root) + w.add_orchestrator(child) + w.start() + + task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=None) + id = task_hub_client.schedule_new_orchestration(root) + state = task_hub_client.wait_for_orchestration_start(id, timeout=30) + assert state is not None + assert state.runtime_status == client.OrchestrationStatus.RUNNING + + # Terminate root orchestration(recursive set to True by default) + task_hub_client.terminate_orchestration(id, output="some reason for termination") + state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) + assert state is not None + assert state.runtime_status == client.OrchestrationStatus.TERMINATED + + # Verify that child orchestration is also terminated + c = task_hub_client.wait_for_orchestration_completion(id, timeout=30) + assert state is not None + assert state.runtime_status == client.OrchestrationStatus.TERMINATED + + task_hub_client.purge_orchestration(id) + state = task_hub_client.get_orchestration_state(id) + assert state is None + + +# def test_continue_as_new(): +# all_results = [] + +# def orchestrator(ctx: task.OrchestrationContext, input: int): +# result = yield ctx.wait_for_external_event("my_event") +# if not ctx.is_replaying: +# # NOTE: Real orchestrations should never interact with nonlocal variables like this. +# nonlocal all_results +# all_results.append(result) + +# if len(all_results) <= 4: +# ctx.continue_as_new(max(all_results), save_events=True) +# else: +# return all_results + +# # Start a worker, which will connect to the sidecar in a background thread +# with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, +# taskhub=taskhub_name, token_credential=None) as w: +# w.add_orchestrator(orchestrator) +# w.start() + +# task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, +# taskhub=taskhub_name, token_credential=None) +# id = task_hub_client.schedule_new_orchestration(orchestrator, input=0) +# task_hub_client.raise_orchestration_event(id, "my_event", data=1) +# task_hub_client.raise_orchestration_event(id, "my_event", data=2) +# task_hub_client.raise_orchestration_event(id, "my_event", data=3) +# task_hub_client.raise_orchestration_event(id, "my_event", data=4) +# task_hub_client.raise_orchestration_event(id, "my_event", data=5) + +# state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) +# assert state is not None +# assert state.runtime_status == client.OrchestrationStatus.COMPLETED +# assert state.serialized_output == json.dumps(all_results) +# assert state.serialized_input == json.dumps(4) +# assert all_results == [1, 2, 3, 4, 5] + + +# NOTE: This test fails when running against durabletask-go with sqlite because the sqlite backend does not yet +# support orchestration ID reuse. This gap is being tracked here: +# https://github.com/microsoft/durabletask-go/issues/42 +def test_retry_policies(): + # This test verifies that the retry policies are working as expected. + # It does this by creating an orchestration that calls a sub-orchestrator, + # which in turn calls an activity that always fails. + # In this test, the retry policies are added, and the orchestration + # should still fail. But, number of times the sub-orchestrator and activity + # is called should increase as per the retry policies. + + child_orch_counter = 0 + throw_activity_counter = 0 + + # Second setup: With retry policies + retry_policy = task.RetryPolicy( + first_retry_interval=timedelta(seconds=1), + max_number_of_attempts=3, + backoff_coefficient=1, + max_retry_interval=timedelta(seconds=10), + retry_timeout=timedelta(seconds=30)) + + def parent_orchestrator_with_retry(ctx: task.OrchestrationContext, _): + yield ctx.call_sub_orchestrator(child_orchestrator_with_retry, retry_policy=retry_policy) + + def child_orchestrator_with_retry(ctx: task.OrchestrationContext, _): + nonlocal child_orch_counter + if not ctx.is_replaying: + # NOTE: Real orchestrations should never interact with nonlocal variables like this. + # This is done only for testing purposes. + child_orch_counter += 1 + yield ctx.call_activity(throw_activity_with_retry, retry_policy=retry_policy) + + def throw_activity_with_retry(ctx: task.ActivityContext, _): + nonlocal throw_activity_counter + throw_activity_counter += 1 + raise RuntimeError("Kah-BOOOOM!!!") + + with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=None) as w: + w.add_orchestrator(parent_orchestrator_with_retry) + w.add_orchestrator(child_orchestrator_with_retry) + w.add_activity(throw_activity_with_retry) + w.start() + + task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=None) + id = task_hub_client.schedule_new_orchestration(parent_orchestrator_with_retry) + state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) + assert state is not None + assert state.runtime_status == client.OrchestrationStatus.FAILED + assert state.failure_details is not None + assert state.failure_details.error_type == "TaskFailedError" + assert state.failure_details.message.startswith("Sub-orchestration task #1 failed:") + assert state.failure_details.message.endswith("Activity task #1 failed: Kah-BOOOOM!!!") + assert state.failure_details.stack_trace is not None + assert throw_activity_counter == 9 + assert child_orch_counter == 3 + + +def test_retry_timeout(): + # This test verifies that the retry timeout is working as expected. + # Max number of attempts is 5 and retry timeout is 14 seconds. + # Total seconds consumed till 4th attempt is 1 + 2 + 4 + 8 = 15 seconds. + # So, the 5th attempt should not be made and the orchestration should fail. + throw_activity_counter = 0 + retry_policy = task.RetryPolicy( + first_retry_interval=timedelta(seconds=1), + max_number_of_attempts=5, + backoff_coefficient=2, + max_retry_interval=timedelta(seconds=10), + retry_timeout=timedelta(seconds=14)) + + def mock_orchestrator(ctx: task.OrchestrationContext, _): + yield ctx.call_activity(throw_activity, retry_policy=retry_policy) + + def throw_activity(ctx: task.ActivityContext, _): + nonlocal throw_activity_counter + throw_activity_counter += 1 + raise RuntimeError("Kah-BOOOOM!!!") + + with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=None) as w: + w.add_orchestrator(mock_orchestrator) + w.add_activity(throw_activity) + w.start() + + task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=None) + id = task_hub_client.schedule_new_orchestration(mock_orchestrator) + state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) + assert state is not None + assert state.runtime_status == client.OrchestrationStatus.FAILED + assert state.failure_details is not None + assert state.failure_details.error_type == "TaskFailedError" + assert state.failure_details.message.endswith("Activity task #1 failed: Kah-BOOOOM!!!") + assert state.failure_details.stack_trace is not None + assert throw_activity_counter == 4 + +def test_custom_status(): + + def empty_orchestrator(ctx: task.OrchestrationContext, _): + ctx.set_custom_status("foobaz") + + # Start a worker, which will connect to the sidecar in a background thread + with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=None) as w: + w.add_orchestrator(empty_orchestrator) + w.start() + + c = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=None) + id = c.schedule_new_orchestration(empty_orchestrator) + state = c.wait_for_orchestration_completion(id, timeout=30) + + assert state is not None + assert state.name == task.get_name(empty_orchestrator) + assert state.instance_id == id + assert state.failure_details is None + assert state.runtime_status == client.OrchestrationStatus.COMPLETED + assert state.serialized_input is None + assert state.serialized_output is None + assert state.serialized_custom_status == "\"foobaz\"" diff --git a/tests/test_activity_executor.py b/tests/durabletask/test_activity_executor.py similarity index 100% rename from tests/test_activity_executor.py rename to tests/durabletask/test_activity_executor.py diff --git a/tests/test_client.py b/tests/durabletask/test_client.py similarity index 100% rename from tests/test_client.py rename to tests/durabletask/test_client.py diff --git a/tests/test_orchestration_e2e.py b/tests/durabletask/test_orchestration_e2e.py similarity index 100% rename from tests/test_orchestration_e2e.py rename to tests/durabletask/test_orchestration_e2e.py diff --git a/tests/test_orchestration_executor.py b/tests/durabletask/test_orchestration_executor.py similarity index 100% rename from tests/test_orchestration_executor.py rename to tests/durabletask/test_orchestration_executor.py From 62d20146c9da8a0c7e9088c7469c1389470270a0 Mon Sep 17 00:00:00 2001 From: Bernd Verst Date: Wed, 26 Mar 2025 13:29:33 -0700 Subject: [PATCH 10/20] Add missing protobuf dependency Signed-off-by: Albert Callarisa --- .../durabletask/azuremanaged/internal/py.typed | 0 durabletask-azuremanaged/durabletask/azuremanaged/py.typed | 0 durabletask-azuremanaged/pyproject.toml | 4 ++-- pyproject.toml | 3 ++- 4 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 durabletask-azuremanaged/durabletask/azuremanaged/internal/py.typed create mode 100644 durabletask-azuremanaged/durabletask/azuremanaged/py.typed diff --git a/durabletask-azuremanaged/durabletask/azuremanaged/internal/py.typed b/durabletask-azuremanaged/durabletask/azuremanaged/internal/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/durabletask-azuremanaged/durabletask/azuremanaged/py.typed b/durabletask-azuremanaged/durabletask/azuremanaged/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/durabletask-azuremanaged/pyproject.toml b/durabletask-azuremanaged/pyproject.toml index c4c8a96..9e724e4 100644 --- a/durabletask-azuremanaged/pyproject.toml +++ b/durabletask-azuremanaged/pyproject.toml @@ -9,7 +9,7 @@ build-backend = "setuptools.build_meta" [project] name = "durabletask.azuremanaged" -version = "0.1.2" +version = "0.1.3" description = "Extensions for the Durable Task Python SDK for integrating with the Durable Task Scheduler in Azure" keywords = [ "durable", @@ -26,7 +26,7 @@ requires-python = ">=3.9" license = {file = "LICENSE"} readme = "README.md" dependencies = [ - "durabletask>=0.2.0", + "durabletask>=0.2.1", "azure-identity>=1.19.0" ] diff --git a/pyproject.toml b/pyproject.toml index d3d9429..60a9d37 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -9,7 +9,7 @@ build-backend = "setuptools.build_meta" [project] name = "durabletask" -version = "0.2.0" +version = "0.2.1" description = "A Durable Task Client SDK for Python" keywords = [ "durable", @@ -26,6 +26,7 @@ license = {file = "LICENSE"} readme = "README.md" dependencies = [ "grpcio", + "protobuf" ] [project.urls] From 04fe99113baa8dc032c2ef8b42c2c5d9a0116283 Mon Sep 17 00:00:00 2001 From: Bernd Verst Date: Tue, 6 May 2025 07:32:00 -0700 Subject: [PATCH 11/20] Add user agent (#49) Signed-off-by: Albert Callarisa --- .../durabletask/azuremanaged/client.py | 8 +- .../internal/durabletask_grpc_interceptor.py | 21 +++- .../durabletask/azuremanaged/worker.py | 8 +- .../test_durabletask_grpc_interceptor.py | 108 ++++++++++++++++++ 4 files changed, 134 insertions(+), 11 deletions(-) create mode 100644 tests/durabletask-azuremanaged/test_durabletask_grpc_interceptor.py diff --git a/durabletask-azuremanaged/durabletask/azuremanaged/client.py b/durabletask-azuremanaged/durabletask/azuremanaged/client.py index 1d8cecd..e1c2445 100644 --- a/durabletask-azuremanaged/durabletask/azuremanaged/client.py +++ b/durabletask-azuremanaged/durabletask/azuremanaged/client.py @@ -1,11 +1,13 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -from azure.core.credentials import TokenCredential from typing import Optional -from durabletask.azuremanaged.internal.durabletask_grpc_interceptor import \ - DTSDefaultClientInterceptorImpl +from azure.core.credentials import TokenCredential + +from durabletask.azuremanaged.internal.durabletask_grpc_interceptor import ( + DTSDefaultClientInterceptorImpl, +) from durabletask.client import TaskHubGrpcClient diff --git a/durabletask-azuremanaged/durabletask/azuremanaged/internal/durabletask_grpc_interceptor.py b/durabletask-azuremanaged/durabletask/azuremanaged/internal/durabletask_grpc_interceptor.py index 077905e..fa1459f 100644 --- a/durabletask-azuremanaged/durabletask/azuremanaged/internal/durabletask_grpc_interceptor.py +++ b/durabletask-azuremanaged/durabletask/azuremanaged/internal/durabletask_grpc_interceptor.py @@ -1,15 +1,17 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -import grpc +from importlib.metadata import version from typing import Optional +import grpc from azure.core.credentials import TokenCredential -from durabletask.azuremanaged.internal.access_token_manager import \ - AccessTokenManager +from durabletask.azuremanaged.internal.access_token_manager import AccessTokenManager from durabletask.internal.grpc_interceptor import ( - DefaultClientInterceptorImpl, _ClientCallDetails) + DefaultClientInterceptorImpl, + _ClientCallDetails, +) class DTSDefaultClientInterceptorImpl (DefaultClientInterceptorImpl): @@ -18,7 +20,16 @@ class DTSDefaultClientInterceptorImpl (DefaultClientInterceptorImpl): interceptor to add additional headers to all calls as needed.""" def __init__(self, token_credential: Optional[TokenCredential], taskhub_name: str): - self._metadata = [("taskhub", taskhub_name)] + try: + # Get the version of the azuremanaged package + sdk_version = version('durabletask-azuremanaged') + except Exception: + # Fallback if version cannot be determined + sdk_version = "unknown" + user_agent = f"durabletask-python/{sdk_version}" + self._metadata = [ + ("taskhub", taskhub_name), + ("x-user-agent", user_agent)] # 'user-agent' is a reserved header in grpc, so we use 'x-user-agent' instead super().__init__(self._metadata) if token_credential is not None: diff --git a/durabletask-azuremanaged/durabletask/azuremanaged/worker.py b/durabletask-azuremanaged/durabletask/azuremanaged/worker.py index 8bdff3d..fd3b1e4 100644 --- a/durabletask-azuremanaged/durabletask/azuremanaged/worker.py +++ b/durabletask-azuremanaged/durabletask/azuremanaged/worker.py @@ -1,11 +1,13 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -from azure.core.credentials import TokenCredential from typing import Optional -from durabletask.azuremanaged.internal.durabletask_grpc_interceptor import \ - DTSDefaultClientInterceptorImpl +from azure.core.credentials import TokenCredential + +from durabletask.azuremanaged.internal.durabletask_grpc_interceptor import ( + DTSDefaultClientInterceptorImpl, +) from durabletask.worker import TaskHubGrpcWorker diff --git a/tests/durabletask-azuremanaged/test_durabletask_grpc_interceptor.py b/tests/durabletask-azuremanaged/test_durabletask_grpc_interceptor.py new file mode 100644 index 0000000..62978f9 --- /dev/null +++ b/tests/durabletask-azuremanaged/test_durabletask_grpc_interceptor.py @@ -0,0 +1,108 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +import threading +import unittest +from concurrent import futures +from importlib.metadata import version + +import grpc + +from durabletask.azuremanaged.client import DurableTaskSchedulerClient +from durabletask.azuremanaged.internal.durabletask_grpc_interceptor import ( + DTSDefaultClientInterceptorImpl, +) +from durabletask.internal import orchestrator_service_pb2 as pb +from durabletask.internal import orchestrator_service_pb2_grpc as stubs + + +class MockTaskHubSidecarServiceServicer(stubs.TaskHubSidecarServiceServicer): + """Mock implementation of the TaskHubSidecarService for testing.""" + + def __init__(self): + self.captured_metadata = {} + self.requests_received = 0 + + def GetInstance(self, request, context): + """Implementation of GetInstance that captures the metadata.""" + # Store all metadata key-value pairs from the context + for key, value in context.invocation_metadata(): + self.captured_metadata[key] = value + + self.requests_received += 1 + + # Return a mock response + response = pb.GetInstanceResponse(exists=False) + return response + + +class TestDurableTaskGrpcInterceptor(unittest.TestCase): + """Tests for the DTSDefaultClientInterceptorImpl class.""" + + @classmethod + def setUpClass(cls): + # Start a real gRPC server on a free port + cls.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) + cls.port = cls.server.add_insecure_port('[::]:0') # Bind to a random free port + cls.server_address = f"localhost:{cls.port}" + + # Add our mock service implementation to the server + cls.mock_servicer = MockTaskHubSidecarServiceServicer() + stubs.add_TaskHubSidecarServiceServicer_to_server(cls.mock_servicer, cls.server) + + # Start the server in a background thread + cls.server.start() + + @classmethod + def tearDownClass(cls): + cls.server.stop(grace=None) + + def test_user_agent_metadata_passed_in_request(self): + """Test that the user agent metadata is correctly passed in gRPC requests.""" + # Create a client that connects to our mock server + # Note: secure_channel is False and token_credential is None as specified + task_hub_client = DurableTaskSchedulerClient( + host_address=self.server_address, + secure_channel=False, + taskhub="test-taskhub", + token_credential=None + ) + + # Make a client call that will trigger our interceptor + task_hub_client.get_orchestration_state("test-instance-id") + + # Verify the request was received by our mock server + self.assertEqual(1, self.mock_servicer.requests_received, "Expected one request to be received") + + # Check if our custom x-user-agent header was correctly set + self.assertIn("x-user-agent", self.mock_servicer.captured_metadata, "x-user-agent header not found") + + # Get what we expect our user agent to be + try: + expected_version = version('durabletask-azuremanaged') + except Exception: + expected_version = "unknown" + + expected_user_agent = f"durabletask-python/{expected_version}" + self.assertEqual( + expected_user_agent, + self.mock_servicer.captured_metadata["x-user-agent"], + f"Expected x-user-agent header to be '{expected_user_agent}'" + ) + + # Check if the taskhub header was correctly set + self.assertIn("taskhub", self.mock_servicer.captured_metadata, "taskhub header not found") + self.assertEqual("test-taskhub", self.mock_servicer.captured_metadata["taskhub"]) + + # Verify the standard gRPC user-agent is different from our custom one + # Note: gRPC automatically adds its own "user-agent" header + self.assertIn("user-agent", self.mock_servicer.captured_metadata, "gRPC user-agent header not found") + self.assertNotEqual( + self.mock_servicer.captured_metadata["user-agent"], + self.mock_servicer.captured_metadata["x-user-agent"], + "gRPC user-agent should be different from our custom x-user-agent" + ) + + +if __name__ == "__main__": + unittest.main() From e6be3d6c8fc6228551a1c9a7fad91627435875d1 Mon Sep 17 00:00:00 2001 From: Bernd Verst Date: Tue, 20 May 2025 11:22:16 -0700 Subject: [PATCH 12/20] Bump azuremanaged version for release Signed-off-by: Albert Callarisa --- dev-requirements.txt | 2 +- durabletask-azuremanaged/pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/dev-requirements.txt b/dev-requirements.txt index 119f072..b3ff6f7 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -1 +1 @@ -grpcio-tools==1.62.3 # 1.62.X is the latest version before protobuf 1.26.X is used which has breaking changes for Python +grpcio-tools diff --git a/durabletask-azuremanaged/pyproject.toml b/durabletask-azuremanaged/pyproject.toml index 9e724e4..5962285 100644 --- a/durabletask-azuremanaged/pyproject.toml +++ b/durabletask-azuremanaged/pyproject.toml @@ -9,7 +9,7 @@ build-backend = "setuptools.build_meta" [project] name = "durabletask.azuremanaged" -version = "0.1.3" +version = "0.1.4" description = "Extensions for the Durable Task Python SDK for integrating with the Durable Task Scheduler in Azure" keywords = [ "durable", From c9704b39de0d41f71853c4a0764bfa161cd9c871 Mon Sep 17 00:00:00 2001 From: Bernd Verst Date: Tue, 3 Jun 2025 10:21:21 -0700 Subject: [PATCH 13/20] Fix and improve connection handling, add concurrency options, prep for release (#50) * Reconnect upon connection error * concurrency * Test updates * More updates * more concurrency stuff * final touches * fix import * update log level * fix exports * more fixup * test updateS * more test imports * fix github workflow pytest * cleanup tests * Python 3.9 specific test fix * fixup reconnection for new concurrency model * autopep8 * Remove existing duplicate import Signed-off-by: Albert Callarisa --- .github/workflows/pr-validation.yml | 3 +- CHANGELOG.md | 14 +- .../durabletask/azuremanaged/worker.py | 53 +- durabletask-azuremanaged/pyproject.toml | 6 +- durabletask/__init__.py | 3 + durabletask/worker.py | 1037 ++++++++++++++--- examples/README.md | 2 +- pyproject.toml | 2 +- tests/durabletask/test_client.py | 7 +- tests/durabletask/test_concurrency_options.py | 96 ++ .../test_worker_concurrency_loop.py | 140 +++ .../test_worker_concurrency_loop_async.py | 80 ++ 12 files changed, 1241 insertions(+), 202 deletions(-) create mode 100644 tests/durabletask/test_concurrency_options.py create mode 100644 tests/durabletask/test_worker_concurrency_loop.py create mode 100644 tests/durabletask/test_worker_concurrency_loop_async.py diff --git a/.github/workflows/pr-validation.yml b/.github/workflows/pr-validation.yml index dddcc53..1d14d83 100644 --- a/.github/workflows/pr-validation.yml +++ b/.github/workflows/pr-validation.yml @@ -25,11 +25,12 @@ jobs: uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - - name: Install durabletask dependencies + - name: Install durabletask dependencies and the library itself in editable mode run: | python -m pip install --upgrade pip pip install flake8 pytest pip install -r requirements.txt + pip install -e . - name: Install durabletask-azuremanaged dependencies working-directory: examples/dts run: | diff --git a/CHANGELOG.md b/CHANGELOG.md index 13b0e69..6921faa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,13 +5,23 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -## v0.2.0 (Unreleased) +## v0.3.0 + +### New + +- Added `ConcurrencyOptions` class for fine-grained concurrency control with separate limits for activities and orchestrations. The thread pool worker count can also be configured. + +### Fixed + +- Fixed an issue where a worker could not recover after its connection was interrupted or severed + +## v0.2.1 ### New - Added `set_custom_status` orchestrator API ([#31](https://github.com/microsoft/durabletask-python/pull/31)) - contributed by [@famarting](https://github.com/famarting) - Added `purge_orchestration` client API ([#34](https://github.com/microsoft/durabletask-python/pull/34)) - contributed by [@famarting](https://github.com/famarting) -- Added new `durabletask-azuremanaged` package for use with the [Durable Task Scheduler](https://techcommunity.microsoft.com/blog/appsonazureblog/announcing-limited-early-access-of-the-durable-task-scheduler-for-azure-durable-/4286526) - by [@RyanLettieri](https://github.com/RyanLettieri) +- Added new `durabletask-azuremanaged` package for use with the [Durable Task Scheduler](https://learn.microsoft.com/azure/azure-functions/durable/durable-task-scheduler/durable-task-scheduler) - by [@RyanLettieri](https://github.com/RyanLettieri) ### Changes diff --git a/durabletask-azuremanaged/durabletask/azuremanaged/worker.py b/durabletask-azuremanaged/durabletask/azuremanaged/worker.py index fd3b1e4..1135ae7 100644 --- a/durabletask-azuremanaged/durabletask/azuremanaged/worker.py +++ b/durabletask-azuremanaged/durabletask/azuremanaged/worker.py @@ -5,19 +5,59 @@ from azure.core.credentials import TokenCredential -from durabletask.azuremanaged.internal.durabletask_grpc_interceptor import ( - DTSDefaultClientInterceptorImpl, -) -from durabletask.worker import TaskHubGrpcWorker +from durabletask.azuremanaged.internal.durabletask_grpc_interceptor import \ + DTSDefaultClientInterceptorImpl +from durabletask.worker import ConcurrencyOptions, TaskHubGrpcWorker # Worker class used for Durable Task Scheduler (DTS) class DurableTaskSchedulerWorker(TaskHubGrpcWorker): + """A worker implementation for Azure Durable Task Scheduler (DTS). + + This class extends TaskHubGrpcWorker to provide integration with Azure's + Durable Task Scheduler service. It handles authentication via Azure credentials + and configures the necessary gRPC interceptors for DTS communication. + + Args: + host_address (str): The gRPC endpoint address of the DTS service. + taskhub (str): The name of the task hub. Cannot be empty. + token_credential (Optional[TokenCredential]): Azure credential for authentication. + If None, anonymous authentication will be used. + secure_channel (bool, optional): Whether to use a secure gRPC channel (TLS). + Defaults to True. + concurrency_options (Optional[ConcurrencyOptions], optional): Configuration + for controlling worker concurrency limits. If None, default concurrency + settings will be used. + + Raises: + ValueError: If taskhub is empty or None. + + Example: + >>> from azure.identity import DefaultAzureCredential + >>> from durabletask.azuremanaged import DurableTaskSchedulerWorker + >>> from durabletask.worker import ConcurrencyOptions + >>> + >>> credential = DefaultAzureCredential() + >>> concurrency = ConcurrencyOptions(max_concurrent_activities=10) + >>> worker = DurableTaskSchedulerWorker( + ... host_address="my-dts-service.azure.com:443", + ... taskhub="my-task-hub", + ... token_credential=credential, + ... concurrency_options=concurrency + ... ) + + Note: + This worker automatically configures DTS-specific gRPC interceptors + for authentication and task hub routing. The parent class metadata + parameter is set to None since authentication is handled by the + DTS interceptor. + """ def __init__(self, *, host_address: str, taskhub: str, token_credential: Optional[TokenCredential], - secure_channel: bool = True): + secure_channel: bool = True, + concurrency_options: Optional[ConcurrencyOptions] = None): if not taskhub: raise ValueError("The taskhub value cannot be empty.") @@ -30,4 +70,5 @@ def __init__(self, *, host_address=host_address, secure_channel=secure_channel, metadata=None, - interceptors=interceptors) + interceptors=interceptors, + concurrency_options=concurrency_options) diff --git a/durabletask-azuremanaged/pyproject.toml b/durabletask-azuremanaged/pyproject.toml index 5962285..250cfcc 100644 --- a/durabletask-azuremanaged/pyproject.toml +++ b/durabletask-azuremanaged/pyproject.toml @@ -9,8 +9,8 @@ build-backend = "setuptools.build_meta" [project] name = "durabletask.azuremanaged" -version = "0.1.4" -description = "Extensions for the Durable Task Python SDK for integrating with the Durable Task Scheduler in Azure" +version = "0.2.0" +description = "Durable Task Python SDK provider implementation for the Azure Durable Task Scheduler" keywords = [ "durable", "task", @@ -26,7 +26,7 @@ requires-python = ">=3.9" license = {file = "LICENSE"} readme = "README.md" dependencies = [ - "durabletask>=0.2.1", + "durabletask>=0.3.0", "azure-identity>=1.19.0" ] diff --git a/durabletask/__init__.py b/durabletask/__init__.py index a37823c..88af82b 100644 --- a/durabletask/__init__.py +++ b/durabletask/__init__.py @@ -3,5 +3,8 @@ """Durable Task SDK for Python""" +from durabletask.worker import ConcurrencyOptions + +__all__ = ["ConcurrencyOptions"] PACKAGE_NAME = "durabletask" diff --git a/durabletask/worker.py b/durabletask/worker.py index 2c31e52..b433a83 100644 --- a/durabletask/worker.py +++ b/durabletask/worker.py @@ -1,8 +1,12 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -import concurrent.futures +import asyncio +import inspect import logging +import os +import random +from concurrent.futures import ThreadPoolExecutor from datetime import datetime, timedelta from threading import Event, Thread from types import GeneratorType @@ -12,19 +16,63 @@ from google.protobuf import empty_pb2 import durabletask.internal.helpers as ph -import durabletask.internal.helpers as pbh import durabletask.internal.orchestrator_service_pb2 as pb import durabletask.internal.orchestrator_service_pb2_grpc as stubs import durabletask.internal.shared as shared from durabletask import task from durabletask.internal.grpc_interceptor import DefaultClientInterceptorImpl -TInput = TypeVar('TInput') -TOutput = TypeVar('TOutput') +TInput = TypeVar("TInput") +TOutput = TypeVar("TOutput") + + +class ConcurrencyOptions: + """Configuration options for controlling concurrency of different work item types and the thread pool size. + + This class provides fine-grained control over concurrent processing limits for + activities, orchestrations and the thread pool size. + """ + + def __init__( + self, + maximum_concurrent_activity_work_items: Optional[int] = None, + maximum_concurrent_orchestration_work_items: Optional[int] = None, + maximum_thread_pool_workers: Optional[int] = None, + ): + """Initialize concurrency options. + + Args: + maximum_concurrent_activity_work_items: Maximum number of activity work items + that can be processed concurrently. Defaults to 100 * processor_count. + maximum_concurrent_orchestration_work_items: Maximum number of orchestration work items + that can be processed concurrently. Defaults to 100 * processor_count. + maximum_thread_pool_workers: Maximum number of thread pool workers to use. + """ + processor_count = os.cpu_count() or 1 + default_concurrency = 100 * processor_count + # see https://docs.python.org/3/library/concurrent.futures.html + default_max_workers = processor_count + 4 + + self.maximum_concurrent_activity_work_items = ( + maximum_concurrent_activity_work_items + if maximum_concurrent_activity_work_items is not None + else default_concurrency + ) + self.maximum_concurrent_orchestration_work_items = ( + maximum_concurrent_orchestration_work_items + if maximum_concurrent_orchestration_work_items is not None + else default_concurrency + ) + + self.maximum_thread_pool_workers = ( + maximum_thread_pool_workers + if maximum_thread_pool_workers is not None + else default_max_workers + ) -class _Registry: +class _Registry: orchestrators: dict[str, task.Orchestrator] activities: dict[str, task.Activity] @@ -34,7 +82,7 @@ def __init__(self): def add_orchestrator(self, fn: task.Orchestrator) -> str: if fn is None: - raise ValueError('An orchestrator function argument is required.') + raise ValueError("An orchestrator function argument is required.") name = task.get_name(fn) self.add_named_orchestrator(name, fn) @@ -42,7 +90,7 @@ def add_orchestrator(self, fn: task.Orchestrator) -> str: def add_named_orchestrator(self, name: str, fn: task.Orchestrator) -> None: if not name: - raise ValueError('A non-empty orchestrator name is required.') + raise ValueError("A non-empty orchestrator name is required.") if name in self.orchestrators: raise ValueError(f"A '{name}' orchestrator already exists.") @@ -53,7 +101,7 @@ def get_orchestrator(self, name: str) -> Optional[task.Orchestrator]: def add_activity(self, fn: task.Activity) -> str: if fn is None: - raise ValueError('An activity function argument is required.') + raise ValueError("An activity function argument is required.") name = task.get_name(fn) self.add_named_activity(name, fn) @@ -61,7 +109,7 @@ def add_activity(self, fn: task.Activity) -> str: def add_named_activity(self, name: str, fn: task.Activity) -> None: if not name: - raise ValueError('A non-empty activity name is required.') + raise ValueError("A non-empty activity name is required.") if name in self.activities: raise ValueError(f"A '{name}' activity already exists.") @@ -73,32 +121,125 @@ def get_activity(self, name: str) -> Optional[task.Activity]: class OrchestratorNotRegisteredError(ValueError): """Raised when attempting to start an orchestration that is not registered""" + pass class ActivityNotRegisteredError(ValueError): """Raised when attempting to call an activity that is not registered""" + pass class TaskHubGrpcWorker: + """A gRPC-based worker for processing durable task orchestrations and activities. + + This worker connects to a Durable Task backend service via gRPC to receive and process + work items including orchestration functions and activity functions. It provides + concurrent execution capabilities with configurable limits and automatic retry handling. + + The worker manages the complete lifecycle: + - Registers orchestrator and activity functions + - Connects to the gRPC backend service + - Receives work items and executes them concurrently + - Handles failures, retries, and state management + - Provides logging and monitoring capabilities + + Args: + host_address (Optional[str], optional): The gRPC endpoint address of the backend service. + Defaults to the value from environment variables or localhost. + metadata (Optional[list[tuple[str, str]]], optional): gRPC metadata to include with + requests. Used for authentication and routing. Defaults to None. + log_handler (optional): Custom logging handler for worker logs. Defaults to None. + log_formatter (Optional[logging.Formatter], optional): Custom log formatter. + Defaults to None. + secure_channel (bool, optional): Whether to use a secure gRPC channel (TLS). + Defaults to False. + interceptors (Optional[Sequence[shared.ClientInterceptor]], optional): Custom gRPC + interceptors to apply to the channel. Defaults to None. + concurrency_options (Optional[ConcurrencyOptions], optional): Configuration for + controlling worker concurrency limits. If None, default settings are used. + + Attributes: + concurrency_options (ConcurrencyOptions): The current concurrency configuration. + + Example: + Basic worker setup: + + >>> from durabletask.worker import TaskHubGrpcWorker, ConcurrencyOptions + >>> + >>> # Create worker with custom concurrency settings + >>> concurrency = ConcurrencyOptions( + ... maximum_concurrent_activity_work_items=50, + ... maximum_concurrent_orchestration_work_items=20 + ... ) + >>> worker = TaskHubGrpcWorker( + ... host_address="localhost:4001", + ... concurrency_options=concurrency + ... ) + >>> + >>> # Register functions + >>> @worker.add_orchestrator + ... def my_orchestrator(context, input): + ... result = yield context.call_activity("my_activity", input="hello") + ... return result + >>> + >>> @worker.add_activity + ... def my_activity(context, input): + ... return f"Processed: {input}" + >>> + >>> # Start the worker + >>> worker.start() + >>> # ... worker runs in background thread + >>> worker.stop() + + Using as context manager: + + >>> with TaskHubGrpcWorker() as worker: + ... worker.add_orchestrator(my_orchestrator) + ... worker.add_activity(my_activity) + ... worker.start() + ... # Worker automatically stops when exiting context + + Raises: + RuntimeError: If attempting to add orchestrators/activities while the worker is running, + or if starting a worker that is already running. + OrchestratorNotRegisteredError: If an orchestration work item references an + unregistered orchestrator function. + ActivityNotRegisteredError: If an activity work item references an unregistered + activity function. + """ + _response_stream: Optional[grpc.Future] = None _interceptors: Optional[list[shared.ClientInterceptor]] = None - def __init__(self, *, - host_address: Optional[str] = None, - metadata: Optional[list[tuple[str, str]]] = None, - log_handler=None, - log_formatter: Optional[logging.Formatter] = None, - secure_channel: bool = False, - interceptors: Optional[Sequence[shared.ClientInterceptor]] = None): + def __init__( + self, + *, + host_address: Optional[str] = None, + metadata: Optional[list[tuple[str, str]]] = None, + log_handler=None, + log_formatter: Optional[logging.Formatter] = None, + secure_channel: bool = False, + interceptors: Optional[Sequence[shared.ClientInterceptor]] = None, + concurrency_options: Optional[ConcurrencyOptions] = None, + ): self._registry = _Registry() - self._host_address = host_address if host_address else shared.get_default_host_address() + self._host_address = ( + host_address if host_address else shared.get_default_host_address() + ) self._logger = shared.get_logger("worker", log_handler, log_formatter) self._shutdown = Event() self._is_running = False self._secure_channel = secure_channel + # Use provided concurrency options or create default ones + self._concurrency_options = ( + concurrency_options + if concurrency_options is not None + else ConcurrencyOptions() + ) + # Determine the interceptors to use if interceptors is not None: self._interceptors = list(interceptors) @@ -109,6 +250,13 @@ def __init__(self, *, else: self._interceptors = None + self._async_worker_manager = _AsyncWorkerManager(self._concurrency_options) + + @property + def concurrency_options(self) -> ConcurrencyOptions: + """Get the current concurrency options for this worker.""" + return self._concurrency_options + def __enter__(self): return self @@ -118,72 +266,223 @@ def __exit__(self, type, value, traceback): def add_orchestrator(self, fn: task.Orchestrator) -> str: """Registers an orchestrator function with the worker.""" if self._is_running: - raise RuntimeError('Orchestrators cannot be added while the worker is running.') + raise RuntimeError( + "Orchestrators cannot be added while the worker is running." + ) return self._registry.add_orchestrator(fn) def add_activity(self, fn: task.Activity) -> str: """Registers an activity function with the worker.""" if self._is_running: - raise RuntimeError('Activities cannot be added while the worker is running.') + raise RuntimeError( + "Activities cannot be added while the worker is running." + ) return self._registry.add_activity(fn) def start(self): """Starts the worker on a background thread and begins listening for work items.""" - channel = shared.get_grpc_channel(self._host_address, self._secure_channel, self._interceptors) - stub = stubs.TaskHubSidecarServiceStub(channel) - if self._is_running: - raise RuntimeError('The worker is already running.') + raise RuntimeError("The worker is already running.") def run_loop(): - # TODO: Investigate whether asyncio could be used to enable greater concurrency for async activity - # functions. We'd need to know ahead of time whether a function is async or not. - # TODO: Max concurrency configuration settings - with concurrent.futures.ThreadPoolExecutor(max_workers=16) as executor: - while not self._shutdown.is_set(): - try: - # send a "Hello" message to the sidecar to ensure that it's listening - stub.Hello(empty_pb2.Empty()) - - # stream work items - self._response_stream = stub.GetWorkItems(pb.GetWorkItemsRequest()) - self._logger.info(f'Successfully connected to {self._host_address}. Waiting for work items...') - - # The stream blocks until either a work item is received or the stream is canceled - # by another thread (see the stop() method). - for work_item in self._response_stream: # type: ignore - request_type = work_item.WhichOneof('request') - self._logger.debug(f'Received "{request_type}" work item') - if work_item.HasField('orchestratorRequest'): - executor.submit(self._execute_orchestrator, work_item.orchestratorRequest, stub, work_item.completionToken) - elif work_item.HasField('activityRequest'): - executor.submit(self._execute_activity, work_item.activityRequest, stub, work_item.completionToken) - elif work_item.HasField('healthPing'): - pass # no-op - else: - self._logger.warning(f'Unexpected work item type: {request_type}') - - except grpc.RpcError as rpc_error: - if rpc_error.code() == grpc.StatusCode.CANCELLED: # type: ignore - self._logger.info(f'Disconnected from {self._host_address}') - elif rpc_error.code() == grpc.StatusCode.UNAVAILABLE: # type: ignore - self._logger.warning( - f'The sidecar at address {self._host_address} is unavailable - will continue retrying') - else: - self._logger.warning(f'Unexpected error: {rpc_error}') - except Exception as ex: - self._logger.warning(f'Unexpected error: {ex}') - - # CONSIDER: exponential backoff - self._shutdown.wait(5) - self._logger.info("No longer listening for work items") - return + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + loop.run_until_complete(self._async_run_loop()) self._logger.info(f"Starting gRPC worker that connects to {self._host_address}") self._runLoop = Thread(target=run_loop) self._runLoop.start() self._is_running = True + async def _async_run_loop(self): + worker_task = asyncio.create_task(self._async_worker_manager.run()) + # Connection state management for retry fix + current_channel = None + current_stub = None + current_reader_thread = None + conn_retry_count = 0 + conn_max_retry_delay = 60 + + def create_fresh_connection(): + nonlocal current_channel, current_stub, conn_retry_count + if current_channel: + try: + current_channel.close() + except Exception: + pass + current_channel = None + current_stub = None + try: + current_channel = shared.get_grpc_channel( + self._host_address, self._secure_channel, self._interceptors + ) + current_stub = stubs.TaskHubSidecarServiceStub(current_channel) + current_stub.Hello(empty_pb2.Empty()) + conn_retry_count = 0 + self._logger.info(f"Created fresh connection to {self._host_address}") + except Exception as e: + self._logger.warning(f"Failed to create connection: {e}") + current_channel = None + current_stub = None + raise + + def invalidate_connection(): + nonlocal current_channel, current_stub, current_reader_thread + # Cancel the response stream first to signal the reader thread to stop + if self._response_stream is not None: + try: + self._response_stream.cancel() + except Exception: + pass + self._response_stream = None + + # Wait for the reader thread to finish + if current_reader_thread is not None: + try: + current_reader_thread.join(timeout=2) + if current_reader_thread.is_alive(): + self._logger.warning("Stream reader thread did not shut down gracefully") + except Exception: + pass + current_reader_thread = None + + # Close the channel + if current_channel: + try: + current_channel.close() + except Exception: + pass + current_channel = None + current_stub = None + + def should_invalidate_connection(rpc_error): + error_code = rpc_error.code() # type: ignore + connection_level_errors = { + grpc.StatusCode.UNAVAILABLE, + grpc.StatusCode.DEADLINE_EXCEEDED, + grpc.StatusCode.CANCELLED, + grpc.StatusCode.UNAUTHENTICATED, + grpc.StatusCode.ABORTED, + } + return error_code in connection_level_errors + + while not self._shutdown.is_set(): + if current_stub is None: + try: + create_fresh_connection() + except Exception: + conn_retry_count += 1 + delay = min( + conn_max_retry_delay, + (2 ** min(conn_retry_count, 6)) + random.uniform(0, 1), + ) + self._logger.warning( + f"Connection failed, retrying in {delay:.2f} seconds (attempt {conn_retry_count})" + ) + if self._shutdown.wait(delay): + break + continue + try: + assert current_stub is not None + stub = current_stub + get_work_items_request = pb.GetWorkItemsRequest( + maxConcurrentOrchestrationWorkItems=self._concurrency_options.maximum_concurrent_orchestration_work_items, + maxConcurrentActivityWorkItems=self._concurrency_options.maximum_concurrent_activity_work_items, + ) + self._response_stream = stub.GetWorkItems(get_work_items_request) + self._logger.info( + f"Successfully connected to {self._host_address}. Waiting for work items..." + ) + + # Use a thread to read from the blocking gRPC stream and forward to asyncio + import queue + + work_item_queue = queue.Queue() + + def stream_reader(): + try: + for work_item in self._response_stream: + work_item_queue.put(work_item) + except Exception as e: + work_item_queue.put(e) + + import threading + + current_reader_thread = threading.Thread(target=stream_reader, daemon=True) + current_reader_thread.start() + loop = asyncio.get_running_loop() + while not self._shutdown.is_set(): + try: + work_item = await loop.run_in_executor( + None, work_item_queue.get + ) + if isinstance(work_item, Exception): + raise work_item + request_type = work_item.WhichOneof("request") + self._logger.debug(f'Received "{request_type}" work item') + if work_item.HasField("orchestratorRequest"): + self._async_worker_manager.submit_orchestration( + self._execute_orchestrator, + work_item.orchestratorRequest, + stub, + work_item.completionToken, + ) + elif work_item.HasField("activityRequest"): + self._async_worker_manager.submit_activity( + self._execute_activity, + work_item.activityRequest, + stub, + work_item.completionToken, + ) + elif work_item.HasField("healthPing"): + pass + else: + self._logger.warning( + f"Unexpected work item type: {request_type}" + ) + except Exception as e: + self._logger.warning(f"Error in work item stream: {e}") + raise e + current_reader_thread.join(timeout=1) + self._logger.info("Work item stream ended normally") + except grpc.RpcError as rpc_error: + should_invalidate = should_invalidate_connection(rpc_error) + if should_invalidate: + invalidate_connection() + error_code = rpc_error.code() # type: ignore + error_details = str(rpc_error) + + if error_code == grpc.StatusCode.CANCELLED: + self._logger.info(f"Disconnected from {self._host_address}") + break + elif error_code == grpc.StatusCode.UNAVAILABLE: + # Check if this is a connection timeout scenario + if "Timeout occurred" in error_details or "Failed to connect to remote host" in error_details: + self._logger.warning( + f"Connection timeout to {self._host_address}: {error_details} - will retry with fresh connection" + ) + else: + self._logger.warning( + f"The sidecar at address {self._host_address} is unavailable: {error_details} - will continue retrying" + ) + elif should_invalidate: + self._logger.warning( + f"Connection-level gRPC error ({error_code}): {rpc_error} - resetting connection" + ) + else: + self._logger.warning( + f"Application-level gRPC error ({error_code}): {rpc_error}" + ) + self._shutdown.wait(1) + except Exception as ex: + invalidate_connection() + self._logger.warning(f"Unexpected error: {ex}") + self._shutdown.wait(1) + invalidate_connection() + self._logger.info("No longer listening for work items") + self._async_worker_manager.shutdown() + await worker_task + def stop(self): """Stops the worker and waits for any pending work items to complete.""" if not self._is_running: @@ -195,51 +494,80 @@ def stop(self): self._response_stream.cancel() if self._runLoop is not None: self._runLoop.join(timeout=30) + self._async_worker_manager.shutdown() self._logger.info("Worker shutdown completed") self._is_running = False - def _execute_orchestrator(self, req: pb.OrchestratorRequest, stub: stubs.TaskHubSidecarServiceStub, completionToken): + def _execute_orchestrator( + self, + req: pb.OrchestratorRequest, + stub: stubs.TaskHubSidecarServiceStub, + completionToken, + ): try: executor = _OrchestrationExecutor(self._registry, self._logger) result = executor.execute(req.instanceId, req.pastEvents, req.newEvents) res = pb.OrchestratorResponse( instanceId=req.instanceId, actions=result.actions, - customStatus=pbh.get_string_value(result.encoded_custom_status), - completionToken=completionToken) + customStatus=ph.get_string_value(result.encoded_custom_status), + completionToken=completionToken, + ) except Exception as ex: - self._logger.exception(f"An error occurred while trying to execute instance '{req.instanceId}': {ex}") - failure_details = pbh.new_failure_details(ex) - actions = [pbh.new_complete_orchestration_action(-1, pb.ORCHESTRATION_STATUS_FAILED, "", failure_details)] - res = pb.OrchestratorResponse(instanceId=req.instanceId, actions=actions, completionToken=completionToken) + self._logger.exception( + f"An error occurred while trying to execute instance '{req.instanceId}': {ex}" + ) + failure_details = ph.new_failure_details(ex) + actions = [ + ph.new_complete_orchestration_action( + -1, pb.ORCHESTRATION_STATUS_FAILED, "", failure_details + ) + ] + res = pb.OrchestratorResponse( + instanceId=req.instanceId, + actions=actions, + completionToken=completionToken, + ) try: stub.CompleteOrchestratorTask(res) except Exception as ex: - self._logger.exception(f"Failed to deliver orchestrator response for '{req.instanceId}' to sidecar: {ex}") - - def _execute_activity(self, req: pb.ActivityRequest, stub: stubs.TaskHubSidecarServiceStub, completionToken): + self._logger.exception( + f"Failed to deliver orchestrator response for '{req.instanceId}' to sidecar: {ex}" + ) + + def _execute_activity( + self, + req: pb.ActivityRequest, + stub: stubs.TaskHubSidecarServiceStub, + completionToken, + ): instance_id = req.orchestrationInstance.instanceId try: executor = _ActivityExecutor(self._registry, self._logger) - result = executor.execute(instance_id, req.name, req.taskId, req.input.value) + result = executor.execute( + instance_id, req.name, req.taskId, req.input.value + ) res = pb.ActivityResponse( instanceId=instance_id, taskId=req.taskId, - result=pbh.get_string_value(result), - completionToken=completionToken) + result=ph.get_string_value(result), + completionToken=completionToken, + ) except Exception as ex: res = pb.ActivityResponse( instanceId=instance_id, taskId=req.taskId, - failureDetails=pbh.new_failure_details(ex), - completionToken=completionToken) + failureDetails=ph.new_failure_details(ex), + completionToken=completionToken, + ) try: stub.CompleteActivityTask(res) except Exception as ex: self._logger.exception( - f"Failed to deliver activity response for '{req.name}#{req.taskId}' of orchestration ID '{instance_id}' to sidecar: {ex}") + f"Failed to deliver activity response for '{req.name}#{req.taskId}' of orchestration ID '{instance_id}' to sidecar: {ex}" + ) class _RuntimeOrchestrationContext(task.OrchestrationContext): @@ -273,7 +601,9 @@ def run(self, generator: Generator[task.Task, Any, Any]): def resume(self): if self._generator is None: # This is never expected unless maybe there's an issue with the history - raise TypeError("The orchestrator generator is not initialized! Was the orchestration history corrupted?") + raise TypeError( + "The orchestrator generator is not initialized! Was the orchestration history corrupted?" + ) # We can resume the generator only if the previously yielded task # has reached a completed state. The only time this won't be the @@ -294,7 +624,12 @@ def resume(self): raise TypeError("The orchestrator generator yielded a non-Task object") self._previous_task = next_task - def set_complete(self, result: Any, status: pb.OrchestrationStatus, is_result_encoded: bool = False): + def set_complete( + self, + result: Any, + status: pb.OrchestrationStatus, + is_result_encoded: bool = False, + ): if self._is_complete: return @@ -307,7 +642,8 @@ def set_complete(self, result: Any, status: pb.OrchestrationStatus, is_result_en if result is not None: result_json = result if is_result_encoded else shared.to_json(result) action = ph.new_complete_orchestration_action( - self.next_sequence_number(), status, result_json) + self.next_sequence_number(), status, result_json + ) self._pending_actions[action.id] = action def set_failed(self, ex: Exception): @@ -319,7 +655,10 @@ def set_failed(self, ex: Exception): self._completion_status = pb.ORCHESTRATION_STATUS_FAILED action = ph.new_complete_orchestration_action( - self.next_sequence_number(), pb.ORCHESTRATION_STATUS_FAILED, None, ph.new_failure_details(ex) + self.next_sequence_number(), + pb.ORCHESTRATION_STATUS_FAILED, + None, + ph.new_failure_details(ex), ) self._pending_actions[action.id] = action @@ -343,14 +682,21 @@ def get_actions(self) -> list[pb.OrchestratorAction]: # replayed when the new instance starts. for event_name, values in self._received_events.items(): for event_value in values: - encoded_value = shared.to_json(event_value) if event_value else None - carryover_events.append(ph.new_event_raised_event(event_name, encoded_value)) + encoded_value = ( + shared.to_json(event_value) if event_value else None + ) + carryover_events.append( + ph.new_event_raised_event(event_name, encoded_value) + ) action = ph.new_complete_orchestration_action( self.next_sequence_number(), pb.ORCHESTRATION_STATUS_CONTINUED_AS_NEW, - result=shared.to_json(self._new_input) if self._new_input is not None else None, + result=shared.to_json(self._new_input) + if self._new_input is not None + else None, failure_details=None, - carryover_events=carryover_events) + carryover_events=carryover_events, + ) return [action] else: return list(self._pending_actions.values()) @@ -367,60 +713,84 @@ def instance_id(self) -> str: def current_utc_datetime(self) -> datetime: return self._current_utc_datetime - @property - def is_replaying(self) -> bool: - return self._is_replaying - @current_utc_datetime.setter def current_utc_datetime(self, value: datetime): self._current_utc_datetime = value + @property + def is_replaying(self) -> bool: + return self._is_replaying + def set_custom_status(self, custom_status: Any) -> None: - self._encoded_custom_status = shared.to_json(custom_status) if custom_status is not None else None + self._encoded_custom_status = ( + shared.to_json(custom_status) if custom_status is not None else None + ) def create_timer(self, fire_at: Union[datetime, timedelta]) -> task.Task: return self.create_timer_internal(fire_at) - def create_timer_internal(self, fire_at: Union[datetime, timedelta], - retryable_task: Optional[task.RetryableTask] = None) -> task.Task: + def create_timer_internal( + self, + fire_at: Union[datetime, timedelta], + retryable_task: Optional[task.RetryableTask] = None, + ) -> task.Task: id = self.next_sequence_number() if isinstance(fire_at, timedelta): fire_at = self.current_utc_datetime + fire_at action = ph.new_create_timer_action(id, fire_at) self._pending_actions[id] = action - timer_task = task.TimerTask() + timer_task: task.TimerTask = task.TimerTask() if retryable_task is not None: timer_task.set_retryable_parent(retryable_task) self._pending_tasks[id] = timer_task return timer_task - def call_activity(self, activity: Union[task.Activity[TInput, TOutput], str], *, - input: Optional[TInput] = None, - retry_policy: Optional[task.RetryPolicy] = None) -> task.Task[TOutput]: + def call_activity( + self, + activity: Union[task.Activity[TInput, TOutput], str], + *, + input: Optional[TInput] = None, + retry_policy: Optional[task.RetryPolicy] = None, + ) -> task.Task[TOutput]: id = self.next_sequence_number() - self.call_activity_function_helper(id, activity, input=input, retry_policy=retry_policy, - is_sub_orch=False) + self.call_activity_function_helper( + id, activity, input=input, retry_policy=retry_policy, is_sub_orch=False + ) return self._pending_tasks.get(id, task.CompletableTask()) - def call_sub_orchestrator(self, orchestrator: task.Orchestrator[TInput, TOutput], *, - input: Optional[TInput] = None, - instance_id: Optional[str] = None, - retry_policy: Optional[task.RetryPolicy] = None) -> task.Task[TOutput]: + def call_sub_orchestrator( + self, + orchestrator: task.Orchestrator[TInput, TOutput], + *, + input: Optional[TInput] = None, + instance_id: Optional[str] = None, + retry_policy: Optional[task.RetryPolicy] = None, + ) -> task.Task[TOutput]: id = self.next_sequence_number() orchestrator_name = task.get_name(orchestrator) - self.call_activity_function_helper(id, orchestrator_name, input=input, retry_policy=retry_policy, - is_sub_orch=True, instance_id=instance_id) + self.call_activity_function_helper( + id, + orchestrator_name, + input=input, + retry_policy=retry_policy, + is_sub_orch=True, + instance_id=instance_id, + ) return self._pending_tasks.get(id, task.CompletableTask()) - def call_activity_function_helper(self, id: Optional[int], - activity_function: Union[task.Activity[TInput, TOutput], str], *, - input: Optional[TInput] = None, - retry_policy: Optional[task.RetryPolicy] = None, - is_sub_orch: bool = False, - instance_id: Optional[str] = None, - fn_task: Optional[task.CompletableTask[TOutput]] = None): + def call_activity_function_helper( + self, + id: Optional[int], + activity_function: Union[task.Activity[TInput, TOutput], str], + *, + input: Optional[TInput] = None, + retry_policy: Optional[task.RetryPolicy] = None, + is_sub_orch: bool = False, + instance_id: Optional[str] = None, + fn_task: Optional[task.CompletableTask[TOutput]] = None, + ): if id is None: id = self.next_sequence_number() @@ -431,7 +801,11 @@ def call_activity_function_helper(self, id: Optional[int], # We just need to take string representation of it. encoded_input = str(input) if not is_sub_orch: - name = activity_function if isinstance(activity_function, str) else task.get_name(activity_function) + name = ( + activity_function + if isinstance(activity_function, str) + else task.get_name(activity_function) + ) action = ph.new_schedule_task_action(id, name, encoded_input) else: if instance_id is None: @@ -439,16 +813,21 @@ def call_activity_function_helper(self, id: Optional[int], instance_id = f"{self.instance_id}:{id:04x}" if not isinstance(activity_function, str): raise ValueError("Orchestrator function name must be a string") - action = ph.new_create_sub_orchestration_action(id, activity_function, instance_id, encoded_input) + action = ph.new_create_sub_orchestration_action( + id, activity_function, instance_id, encoded_input + ) self._pending_actions[id] = action if fn_task is None: if retry_policy is None: fn_task = task.CompletableTask[TOutput]() else: - fn_task = task.RetryableTask[TOutput](retry_policy=retry_policy, action=action, - start_time=self.current_utc_datetime, - is_sub_orch=is_sub_orch) + fn_task = task.RetryableTask[TOutput]( + retry_policy=retry_policy, + action=action, + start_time=self.current_utc_datetime, + is_sub_orch=is_sub_orch, + ) self._pending_tasks[id] = fn_task def wait_for_external_event(self, name: str) -> task.Task: @@ -457,7 +836,7 @@ def wait_for_external_event(self, name: str) -> task.Task: # event with the given name so that we can resume the generator when it # arrives. If there are multiple events with the same name, we return # them in the order they were received. - external_event_task = task.CompletableTask() + external_event_task: task.CompletableTask = task.CompletableTask() event_name = name.casefold() event_list = self._received_events.get(event_name, None) if event_list: @@ -484,7 +863,9 @@ class ExecutionResults: actions: list[pb.OrchestratorAction] encoded_custom_status: Optional[str] - def __init__(self, actions: list[pb.OrchestratorAction], encoded_custom_status: Optional[str]): + def __init__( + self, actions: list[pb.OrchestratorAction], encoded_custom_status: Optional[str] + ): self.actions = actions self.encoded_custom_status = encoded_custom_status @@ -498,14 +879,23 @@ def __init__(self, registry: _Registry, logger: logging.Logger): self._is_suspended = False self._suspended_events: list[pb.HistoryEvent] = [] - def execute(self, instance_id: str, old_events: Sequence[pb.HistoryEvent], new_events: Sequence[pb.HistoryEvent]) -> ExecutionResults: + def execute( + self, + instance_id: str, + old_events: Sequence[pb.HistoryEvent], + new_events: Sequence[pb.HistoryEvent], + ) -> ExecutionResults: if not new_events: - raise task.OrchestrationStateError("The new history event list must have at least one event in it.") + raise task.OrchestrationStateError( + "The new history event list must have at least one event in it." + ) ctx = _RuntimeOrchestrationContext(instance_id) try: # Rebuild local state by replaying old history into the orchestrator function - self._logger.debug(f"{instance_id}: Rebuilding local state with {len(old_events)} history event...") + self._logger.debug( + f"{instance_id}: Rebuilding local state with {len(old_events)} history event..." + ) ctx._is_replaying = True for old_event in old_events: self.process_event(ctx, old_event) @@ -513,7 +903,9 @@ def execute(self, instance_id: str, old_events: Sequence[pb.HistoryEvent], new_e # Get new actions by executing newly received events into the orchestrator function if self._logger.level <= logging.DEBUG: summary = _get_new_event_summary(new_events) - self._logger.debug(f"{instance_id}: Processing {len(new_events)} new event(s): {summary}") + self._logger.debug( + f"{instance_id}: Processing {len(new_events)} new event(s): {summary}" + ) ctx._is_replaying = False for new_event in new_events: self.process_event(ctx, new_event) @@ -525,17 +917,31 @@ def execute(self, instance_id: str, old_events: Sequence[pb.HistoryEvent], new_e if not ctx._is_complete: task_count = len(ctx._pending_tasks) event_count = len(ctx._pending_events) - self._logger.info(f"{instance_id}: Orchestrator yielded with {task_count} task(s) and {event_count} event(s) outstanding.") - elif ctx._completion_status and ctx._completion_status is not pb.ORCHESTRATION_STATUS_CONTINUED_AS_NEW: - completion_status_str = pbh.get_orchestration_status_str(ctx._completion_status) - self._logger.info(f"{instance_id}: Orchestration completed with status: {completion_status_str}") + self._logger.info( + f"{instance_id}: Orchestrator yielded with {task_count} task(s) and {event_count} event(s) outstanding." + ) + elif ( + ctx._completion_status and ctx._completion_status is not pb.ORCHESTRATION_STATUS_CONTINUED_AS_NEW + ): + completion_status_str = ph.get_orchestration_status_str( + ctx._completion_status + ) + self._logger.info( + f"{instance_id}: Orchestration completed with status: {completion_status_str}" + ) actions = ctx.get_actions() if self._logger.level <= logging.DEBUG: - self._logger.debug(f"{instance_id}: Returning {len(actions)} action(s): {_get_action_summary(actions)}") - return ExecutionResults(actions=actions, encoded_custom_status=ctx._encoded_custom_status) + self._logger.debug( + f"{instance_id}: Returning {len(actions)} action(s): {_get_action_summary(actions)}" + ) + return ExecutionResults( + actions=actions, encoded_custom_status=ctx._encoded_custom_status + ) - def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEvent) -> None: + def process_event( + self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEvent + ) -> None: if self._is_suspended and _is_suspendable(event): # We are suspended, so we need to buffer this event until we are resumed self._suspended_events.append(event) @@ -550,14 +956,19 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven fn = self._registry.get_orchestrator(event.executionStarted.name) if fn is None: raise OrchestratorNotRegisteredError( - f"A '{event.executionStarted.name}' orchestrator was not registered.") + f"A '{event.executionStarted.name}' orchestrator was not registered." + ) # deserialize the input, if any input = None - if event.executionStarted.input is not None and event.executionStarted.input.value != "": + if ( + event.executionStarted.input is not None and event.executionStarted.input.value != "" + ): input = shared.from_json(event.executionStarted.input.value) - result = fn(ctx, input) # this does not execute the generator, only creates it + result = fn( + ctx, input + ) # this does not execute the generator, only creates it if isinstance(result, GeneratorType): # Start the orchestrator's generator function ctx.run(result) @@ -570,10 +981,14 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven timer_id = event.eventId action = ctx._pending_actions.pop(timer_id, None) if not action: - raise _get_non_determinism_error(timer_id, task.get_name(ctx.create_timer)) + raise _get_non_determinism_error( + timer_id, task.get_name(ctx.create_timer) + ) elif not action.HasField("createTimer"): expected_method_name = task.get_name(ctx.create_timer) - raise _get_wrong_action_type_error(timer_id, expected_method_name, action) + raise _get_wrong_action_type_error( + timer_id, expected_method_name, action + ) elif event.HasField("timerFired"): timer_id = event.timerFired.timerId timer_task = ctx._pending_tasks.pop(timer_id, None) @@ -581,7 +996,8 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven # TODO: Should this be an error? When would it ever happen? if not ctx._is_replaying: self._logger.warning( - f"{ctx.instance_id}: Ignoring unexpected timerFired event with ID = {timer_id}.") + f"{ctx.instance_id}: Ignoring unexpected timerFired event with ID = {timer_id}." + ) return timer_task.complete(None) if timer_task._retryable_parent is not None: @@ -593,12 +1009,15 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven else: cur_task = activity_action.createSubOrchestration instance_id = cur_task.instanceId - ctx.call_activity_function_helper(id=activity_action.id, activity_function=cur_task.name, - input=cur_task.input.value, - retry_policy=timer_task._retryable_parent._retry_policy, - is_sub_orch=timer_task._retryable_parent._is_sub_orch, - instance_id=instance_id, - fn_task=timer_task._retryable_parent) + ctx.call_activity_function_helper( + id=activity_action.id, + activity_function=cur_task.name, + input=cur_task.input.value, + retry_policy=timer_task._retryable_parent._retry_policy, + is_sub_orch=timer_task._retryable_parent._is_sub_orch, + instance_id=instance_id, + fn_task=timer_task._retryable_parent, + ) else: ctx.resume() elif event.HasField("taskScheduled"): @@ -608,16 +1027,21 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven action = ctx._pending_actions.pop(task_id, None) activity_task = ctx._pending_tasks.get(task_id, None) if not action: - raise _get_non_determinism_error(task_id, task.get_name(ctx.call_activity)) + raise _get_non_determinism_error( + task_id, task.get_name(ctx.call_activity) + ) elif not action.HasField("scheduleTask"): expected_method_name = task.get_name(ctx.call_activity) - raise _get_wrong_action_type_error(task_id, expected_method_name, action) + raise _get_wrong_action_type_error( + task_id, expected_method_name, action + ) elif action.scheduleTask.name != event.taskScheduled.name: raise _get_wrong_action_name_error( task_id, method_name=task.get_name(ctx.call_activity), expected_task_name=event.taskScheduled.name, - actual_task_name=action.scheduleTask.name) + actual_task_name=action.scheduleTask.name, + ) elif event.HasField("taskCompleted"): # This history event contains the result of a completed activity task. task_id = event.taskCompleted.taskScheduledId @@ -626,7 +1050,8 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven # TODO: Should this be an error? When would it ever happen? if not ctx.is_replaying: self._logger.warning( - f"{ctx.instance_id}: Ignoring unexpected taskCompleted event with ID = {task_id}.") + f"{ctx.instance_id}: Ignoring unexpected taskCompleted event with ID = {task_id}." + ) return result = None if not ph.is_empty(event.taskCompleted.result): @@ -640,7 +1065,8 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven # TODO: Should this be an error? When would it ever happen? if not ctx.is_replaying: self._logger.warning( - f"{ctx.instance_id}: Ignoring unexpected taskFailed event with ID = {task_id}.") + f"{ctx.instance_id}: Ignoring unexpected taskFailed event with ID = {task_id}." + ) return if isinstance(activity_task, task.RetryableTask): @@ -649,7 +1075,8 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven if next_delay is None: activity_task.fail( f"{ctx.instance_id}: Activity task #{task_id} failed: {event.taskFailed.failureDetails.errorMessage}", - event.taskFailed.failureDetails) + event.taskFailed.failureDetails, + ) ctx.resume() else: activity_task.increment_attempt_count() @@ -657,7 +1084,8 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven elif isinstance(activity_task, task.CompletableTask): activity_task.fail( f"{ctx.instance_id}: Activity task #{task_id} failed: {event.taskFailed.failureDetails.errorMessage}", - event.taskFailed.failureDetails) + event.taskFailed.failureDetails, + ) ctx.resume() else: raise TypeError("Unexpected task type") @@ -667,16 +1095,23 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven task_id = event.eventId action = ctx._pending_actions.pop(task_id, None) if not action: - raise _get_non_determinism_error(task_id, task.get_name(ctx.call_sub_orchestrator)) + raise _get_non_determinism_error( + task_id, task.get_name(ctx.call_sub_orchestrator) + ) elif not action.HasField("createSubOrchestration"): expected_method_name = task.get_name(ctx.call_sub_orchestrator) - raise _get_wrong_action_type_error(task_id, expected_method_name, action) - elif action.createSubOrchestration.name != event.subOrchestrationInstanceCreated.name: + raise _get_wrong_action_type_error( + task_id, expected_method_name, action + ) + elif ( + action.createSubOrchestration.name != event.subOrchestrationInstanceCreated.name + ): raise _get_wrong_action_name_error( task_id, method_name=task.get_name(ctx.call_sub_orchestrator), expected_task_name=event.subOrchestrationInstanceCreated.name, - actual_task_name=action.createSubOrchestration.name) + actual_task_name=action.createSubOrchestration.name, + ) elif event.HasField("subOrchestrationInstanceCompleted"): task_id = event.subOrchestrationInstanceCompleted.taskScheduledId sub_orch_task = ctx._pending_tasks.pop(task_id, None) @@ -684,11 +1119,14 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven # TODO: Should this be an error? When would it ever happen? if not ctx.is_replaying: self._logger.warning( - f"{ctx.instance_id}: Ignoring unexpected subOrchestrationInstanceCompleted event with ID = {task_id}.") + f"{ctx.instance_id}: Ignoring unexpected subOrchestrationInstanceCompleted event with ID = {task_id}." + ) return result = None if not ph.is_empty(event.subOrchestrationInstanceCompleted.result): - result = shared.from_json(event.subOrchestrationInstanceCompleted.result.value) + result = shared.from_json( + event.subOrchestrationInstanceCompleted.result.value + ) sub_orch_task.complete(result) ctx.resume() elif event.HasField("subOrchestrationInstanceFailed"): @@ -699,7 +1137,8 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven # TODO: Should this be an error? When would it ever happen? if not ctx.is_replaying: self._logger.warning( - f"{ctx.instance_id}: Ignoring unexpected subOrchestrationInstanceFailed event with ID = {task_id}.") + f"{ctx.instance_id}: Ignoring unexpected subOrchestrationInstanceFailed event with ID = {task_id}." + ) return if isinstance(sub_orch_task, task.RetryableTask): if sub_orch_task._retry_policy is not None: @@ -707,7 +1146,8 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven if next_delay is None: sub_orch_task.fail( f"Sub-orchestration task #{task_id} failed: {failedEvent.failureDetails.errorMessage}", - failedEvent.failureDetails) + failedEvent.failureDetails, + ) ctx.resume() else: sub_orch_task.increment_attempt_count() @@ -715,7 +1155,8 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven elif isinstance(sub_orch_task, task.CompletableTask): sub_orch_task.fail( f"Sub-orchestration task #{task_id} failed: {failedEvent.failureDetails.errorMessage}", - failedEvent.failureDetails) + failedEvent.failureDetails, + ) ctx.resume() else: raise TypeError("Unexpected sub-orchestration task type") @@ -744,7 +1185,9 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven decoded_result = shared.from_json(event.eventRaised.input.value) event_list.append(decoded_result) if not ctx.is_replaying: - self._logger.info(f"{ctx.instance_id}: Event '{event_name}' has been buffered as there are no tasks waiting for it.") + self._logger.info( + f"{ctx.instance_id}: Event '{event_name}' has been buffered as there are no tasks waiting for it." + ) elif event.HasField("executionSuspended"): if not self._is_suspended and not ctx.is_replaying: self._logger.info(f"{ctx.instance_id}: Execution suspended.") @@ -759,11 +1202,21 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven elif event.HasField("executionTerminated"): if not ctx.is_replaying: self._logger.info(f"{ctx.instance_id}: Execution terminating.") - encoded_output = event.executionTerminated.input.value if not ph.is_empty(event.executionTerminated.input) else None - ctx.set_complete(encoded_output, pb.ORCHESTRATION_STATUS_TERMINATED, is_result_encoded=True) + encoded_output = ( + event.executionTerminated.input.value + if not ph.is_empty(event.executionTerminated.input) + else None + ) + ctx.set_complete( + encoded_output, + pb.ORCHESTRATION_STATUS_TERMINATED, + is_result_encoded=True, + ) else: eventType = event.WhichOneof("eventType") - raise task.OrchestrationStateError(f"Don't know how to handle event of type '{eventType}'") + raise task.OrchestrationStateError( + f"Don't know how to handle event of type '{eventType}'" + ) except StopIteration as generatorStopped: # The orchestrator generator function completed ctx.set_complete(generatorStopped.value, pb.ORCHESTRATION_STATUS_COMPLETED) @@ -774,12 +1227,22 @@ def __init__(self, registry: _Registry, logger: logging.Logger): self._registry = registry self._logger = logger - def execute(self, orchestration_id: str, name: str, task_id: int, encoded_input: Optional[str]) -> Optional[str]: + def execute( + self, + orchestration_id: str, + name: str, + task_id: int, + encoded_input: Optional[str], + ) -> Optional[str]: """Executes an activity function and returns the serialized result, if any.""" - self._logger.debug(f"{orchestration_id}/{task_id}: Executing activity '{name}'...") + self._logger.debug( + f"{orchestration_id}/{task_id}: Executing activity '{name}'..." + ) fn = self._registry.get_activity(name) if not fn: - raise ActivityNotRegisteredError(f"Activity function named '{name}' was not registered!") + raise ActivityNotRegisteredError( + f"Activity function named '{name}' was not registered!" + ) activity_input = shared.from_json(encoded_input) if encoded_input else None ctx = task.ActivityContext(orchestration_id, task_id) @@ -787,49 +1250,54 @@ def execute(self, orchestration_id: str, name: str, task_id: int, encoded_input: # Execute the activity function activity_output = fn(ctx, activity_input) - encoded_output = shared.to_json(activity_output) if activity_output is not None else None + encoded_output = ( + shared.to_json(activity_output) if activity_output is not None else None + ) chars = len(encoded_output) if encoded_output else 0 self._logger.debug( - f"{orchestration_id}/{task_id}: Activity '{name}' completed successfully with {chars} char(s) of encoded output.") + f"{orchestration_id}/{task_id}: Activity '{name}' completed successfully with {chars} char(s) of encoded output." + ) return encoded_output -def _get_non_determinism_error(task_id: int, action_name: str) -> task.NonDeterminismError: +def _get_non_determinism_error( + task_id: int, action_name: str +) -> task.NonDeterminismError: return task.NonDeterminismError( f"A previous execution called {action_name} with ID={task_id}, but the current " f"execution doesn't have this action with this ID. This problem occurs when either " f"the orchestration has non-deterministic logic or if the code was changed after an " - f"instance of this orchestration already started running.") + f"instance of this orchestration already started running." + ) def _get_wrong_action_type_error( - task_id: int, - expected_method_name: str, - action: pb.OrchestratorAction) -> task.NonDeterminismError: + task_id: int, expected_method_name: str, action: pb.OrchestratorAction +) -> task.NonDeterminismError: unexpected_method_name = _get_method_name_for_action(action) return task.NonDeterminismError( f"Failed to restore orchestration state due to a history mismatch: A previous execution called " f"{expected_method_name} with ID={task_id}, but the current execution is instead trying to call " f"{unexpected_method_name} as part of rebuilding it's history. This kind of mismatch can happen if an " f"orchestration has non-deterministic logic or if the code was changed after an instance of this " - f"orchestration already started running.") + f"orchestration already started running." + ) def _get_wrong_action_name_error( - task_id: int, - method_name: str, - expected_task_name: str, - actual_task_name: str) -> task.NonDeterminismError: + task_id: int, method_name: str, expected_task_name: str, actual_task_name: str +) -> task.NonDeterminismError: return task.NonDeterminismError( f"Failed to restore orchestration state due to a history mismatch: A previous execution called " f"{method_name} with name='{expected_task_name}' and sequence number {task_id}, but the current " f"execution is instead trying to call {actual_task_name} as part of rebuilding it's history. " f"This kind of mismatch can happen if an orchestration has non-deterministic logic or if the code " - f"was changed after an instance of this orchestration already started running.") + f"was changed after an instance of this orchestration already started running." + ) def _get_method_name_for_action(action: pb.OrchestratorAction) -> str: - action_type = action.WhichOneof('orchestratorActionType') + action_type = action.WhichOneof("orchestratorActionType") if action_type == "scheduleTask": return task.get_name(task.OrchestrationContext.call_activity) elif action_type == "createTimer": @@ -851,7 +1319,7 @@ def _get_new_event_summary(new_events: Sequence[pb.HistoryEvent]) -> str: else: counts: dict[str, int] = {} for event in new_events: - event_type = event.WhichOneof('eventType') + event_type = event.WhichOneof("eventType") counts[event_type] = counts.get(event_type, 0) + 1 return f"[{', '.join(f'{name}={count}' for name, count in counts.items())}]" @@ -865,11 +1333,210 @@ def _get_action_summary(new_actions: Sequence[pb.OrchestratorAction]) -> str: else: counts: dict[str, int] = {} for action in new_actions: - action_type = action.WhichOneof('orchestratorActionType') + action_type = action.WhichOneof("orchestratorActionType") counts[action_type] = counts.get(action_type, 0) + 1 return f"[{', '.join(f'{name}={count}' for name, count in counts.items())}]" def _is_suspendable(event: pb.HistoryEvent) -> bool: """Returns true if the event is one that can be suspended and resumed.""" - return event.WhichOneof("eventType") not in ["executionResumed", "executionTerminated"] + return event.WhichOneof("eventType") not in [ + "executionResumed", + "executionTerminated", + ] + + +class _AsyncWorkerManager: + def __init__(self, concurrency_options: ConcurrencyOptions): + self.concurrency_options = concurrency_options + self.activity_semaphore = None + self.orchestration_semaphore = None + # Don't create queues here - defer until we have an event loop + self.activity_queue: Optional[asyncio.Queue] = None + self.orchestration_queue: Optional[asyncio.Queue] = None + self._queue_event_loop: Optional[asyncio.AbstractEventLoop] = None + # Store work items when no event loop is available + self._pending_activity_work: list = [] + self._pending_orchestration_work: list = [] + self.thread_pool = ThreadPoolExecutor( + max_workers=concurrency_options.maximum_thread_pool_workers, + thread_name_prefix="DurableTask", + ) + self._shutdown = False + + def _ensure_queues_for_current_loop(self): + """Ensure queues are bound to the current event loop.""" + try: + current_loop = asyncio.get_running_loop() + except RuntimeError: + # No event loop running, can't create queues + return + + # Check if queues are already properly set up for current loop + if self._queue_event_loop is current_loop: + if self.activity_queue is not None and self.orchestration_queue is not None: + # Queues are already bound to the current loop and exist + return + + # Need to recreate queues for the current event loop + # First, preserve any existing work items + existing_activity_items = [] + existing_orchestration_items = [] + + if self.activity_queue is not None: + try: + while not self.activity_queue.empty(): + existing_activity_items.append(self.activity_queue.get_nowait()) + except Exception: + pass + + if self.orchestration_queue is not None: + try: + while not self.orchestration_queue.empty(): + existing_orchestration_items.append( + self.orchestration_queue.get_nowait() + ) + except Exception: + pass + + # Create fresh queues for the current event loop + self.activity_queue = asyncio.Queue() + self.orchestration_queue = asyncio.Queue() + self._queue_event_loop = current_loop + + # Restore the work items to the new queues + for item in existing_activity_items: + self.activity_queue.put_nowait(item) + for item in existing_orchestration_items: + self.orchestration_queue.put_nowait(item) + + # Move pending work items to the queues + for item in self._pending_activity_work: + self.activity_queue.put_nowait(item) + for item in self._pending_orchestration_work: + self.orchestration_queue.put_nowait(item) + + # Clear the pending work lists + self._pending_activity_work.clear() + self._pending_orchestration_work.clear() + + async def run(self): + # Reset shutdown flag in case this manager is being reused + self._shutdown = False + + # Ensure queues are properly bound to the current event loop + self._ensure_queues_for_current_loop() + + # Create semaphores in the current event loop + self.activity_semaphore = asyncio.Semaphore( + self.concurrency_options.maximum_concurrent_activity_work_items + ) + self.orchestration_semaphore = asyncio.Semaphore( + self.concurrency_options.maximum_concurrent_orchestration_work_items + ) + + # Start background consumers for each work type + if self.activity_queue is not None and self.orchestration_queue is not None: + await asyncio.gather( + self._consume_queue(self.activity_queue, self.activity_semaphore), + self._consume_queue( + self.orchestration_queue, self.orchestration_semaphore + ), + ) + + async def _consume_queue(self, queue: asyncio.Queue, semaphore: asyncio.Semaphore): + # List to track running tasks + running_tasks: set[asyncio.Task] = set() + + while True: + # Clean up completed tasks + done_tasks = {task for task in running_tasks if task.done()} + running_tasks -= done_tasks + + # Exit if shutdown is set and the queue is empty and no tasks are running + if self._shutdown and queue.empty() and not running_tasks: + break + + try: + work = await asyncio.wait_for(queue.get(), timeout=1.0) + except asyncio.TimeoutError: + continue + + func, args, kwargs = work + # Create a concurrent task for processing + task = asyncio.create_task( + self._process_work_item(semaphore, queue, func, args, kwargs) + ) + running_tasks.add(task) + + async def _process_work_item( + self, semaphore: asyncio.Semaphore, queue: asyncio.Queue, func, args, kwargs + ): + async with semaphore: + try: + await self._run_func(func, *args, **kwargs) + finally: + queue.task_done() + + async def _run_func(self, func, *args, **kwargs): + if inspect.iscoroutinefunction(func): + return await func(*args, **kwargs) + else: + loop = asyncio.get_running_loop() + # Avoid submitting to executor after shutdown + if ( + getattr(self, "_shutdown", False) and getattr(self, "thread_pool", None) and getattr( + self.thread_pool, "_shutdown", False) + ): + return None + return await loop.run_in_executor( + self.thread_pool, lambda: func(*args, **kwargs) + ) + + def submit_activity(self, func, *args, **kwargs): + work_item = (func, args, kwargs) + self._ensure_queues_for_current_loop() + if self.activity_queue is not None: + self.activity_queue.put_nowait(work_item) + else: + # No event loop running, store in pending list + self._pending_activity_work.append(work_item) + + def submit_orchestration(self, func, *args, **kwargs): + work_item = (func, args, kwargs) + self._ensure_queues_for_current_loop() + if self.orchestration_queue is not None: + self.orchestration_queue.put_nowait(work_item) + else: + # No event loop running, store in pending list + self._pending_orchestration_work.append(work_item) + + def shutdown(self): + self._shutdown = True + self.thread_pool.shutdown(wait=True) + + def reset_for_new_run(self): + """Reset the manager state for a new run.""" + self._shutdown = False + # Clear any existing queues - they'll be recreated when needed + if self.activity_queue is not None: + # Clear existing queue by creating a new one + # This ensures no items from previous runs remain + try: + while not self.activity_queue.empty(): + self.activity_queue.get_nowait() + except Exception: + pass + if self.orchestration_queue is not None: + try: + while not self.orchestration_queue.empty(): + self.orchestration_queue.get_nowait() + except Exception: + pass + # Clear pending work lists + self._pending_activity_work.clear() + self._pending_orchestration_work.clear() + + +# Export public API +__all__ = ["ConcurrencyOptions", "TaskHubGrpcWorker"] diff --git a/examples/README.md b/examples/README.md index 7cfbc7a..404b127 100644 --- a/examples/README.md +++ b/examples/README.md @@ -24,4 +24,4 @@ In some cases, the sample may require command-line parameters or user inputs. In - [Activity sequence](./activity_sequence.py): Orchestration that schedules three activity calls in a sequence. - [Fan-out/fan-in](./fanout_fanin.py): Orchestration that schedules a dynamic number of activity calls in parallel, waits for all of them to complete, and then performs an aggregation on the results. -- [Human interaction](./human_interaction.py): Orchestration that waits for a human to approve an order before continuing. \ No newline at end of file +- [Human interaction](./human_interaction.py): Orchestration that waits for a human to approve an order before continuing. diff --git a/pyproject.toml b/pyproject.toml index 60a9d37..1491988 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -9,7 +9,7 @@ build-backend = "setuptools.build_meta" [project] name = "durabletask" -version = "0.2.1" +version = "0.3.0" description = "A Durable Task Client SDK for Python" keywords = [ "durable", diff --git a/tests/durabletask/test_client.py b/tests/durabletask/test_client.py index 64bbec8..e750134 100644 --- a/tests/durabletask/test_client.py +++ b/tests/durabletask/test_client.py @@ -1,13 +1,14 @@ -from unittest.mock import patch, ANY +from unittest.mock import ANY, patch +from durabletask.internal.grpc_interceptor import DefaultClientInterceptorImpl from durabletask.internal.shared import (get_default_host_address, get_grpc_channel) -from durabletask.internal.grpc_interceptor import DefaultClientInterceptorImpl HOST_ADDRESS = 'localhost:50051' METADATA = [('key1', 'value1'), ('key2', 'value2')] INTERCEPTORS = [DefaultClientInterceptorImpl(METADATA)] + def test_get_grpc_channel_insecure(): with patch('grpc.insecure_channel') as mock_channel: get_grpc_channel(HOST_ADDRESS, False, interceptors=INTERCEPTORS) @@ -85,4 +86,4 @@ def test_grpc_channel_with_host_name_protocol_stripping(): prefix = "" get_grpc_channel(prefix + host_name, True, interceptors=INTERCEPTORS) - mock_secure_channel.assert_called_with(host_name, ANY) \ No newline at end of file + mock_secure_channel.assert_called_with(host_name, ANY) diff --git a/tests/durabletask/test_concurrency_options.py b/tests/durabletask/test_concurrency_options.py new file mode 100644 index 0000000..b49b7ec --- /dev/null +++ b/tests/durabletask/test_concurrency_options.py @@ -0,0 +1,96 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +import os + +from durabletask.worker import ConcurrencyOptions, TaskHubGrpcWorker + + +def test_default_concurrency_options(): + """Test that default concurrency options work correctly.""" + options = ConcurrencyOptions() + processor_count = os.cpu_count() or 1 + expected_default = 100 * processor_count + expected_workers = processor_count + 4 + + assert options.maximum_concurrent_activity_work_items == expected_default + assert options.maximum_concurrent_orchestration_work_items == expected_default + assert options.maximum_thread_pool_workers == expected_workers + + +def test_custom_concurrency_options(): + """Test that custom concurrency options work correctly.""" + options = ConcurrencyOptions( + maximum_concurrent_activity_work_items=50, + maximum_concurrent_orchestration_work_items=25, + maximum_thread_pool_workers=30, + ) + + assert options.maximum_concurrent_activity_work_items == 50 + assert options.maximum_concurrent_orchestration_work_items == 25 + assert options.maximum_thread_pool_workers == 30 + + +def test_partial_custom_options(): + """Test that partially specified options use defaults for unspecified values.""" + processor_count = os.cpu_count() or 1 + expected_default = 100 * processor_count + expected_workers = processor_count + 4 + + options = ConcurrencyOptions( + maximum_concurrent_activity_work_items=30 + ) + + assert options.maximum_concurrent_activity_work_items == 30 + assert options.maximum_concurrent_orchestration_work_items == expected_default + assert options.maximum_thread_pool_workers == expected_workers + + +def test_worker_with_concurrency_options(): + """Test that TaskHubGrpcWorker accepts concurrency options.""" + options = ConcurrencyOptions( + maximum_concurrent_activity_work_items=10, + maximum_concurrent_orchestration_work_items=20, + maximum_thread_pool_workers=15, + ) + + worker = TaskHubGrpcWorker(concurrency_options=options) + + assert worker.concurrency_options == options + + +def test_worker_default_options(): + """Test that TaskHubGrpcWorker uses default options when no parameters are provided.""" + worker = TaskHubGrpcWorker() + + processor_count = os.cpu_count() or 1 + expected_default = 100 * processor_count + expected_workers = processor_count + 4 + + assert ( + worker.concurrency_options.maximum_concurrent_activity_work_items == expected_default + ) + assert ( + worker.concurrency_options.maximum_concurrent_orchestration_work_items == expected_default + ) + assert worker.concurrency_options.maximum_thread_pool_workers == expected_workers + + +def test_concurrency_options_property_access(): + """Test that the concurrency_options property works correctly.""" + options = ConcurrencyOptions( + maximum_concurrent_activity_work_items=15, + maximum_concurrent_orchestration_work_items=25, + maximum_thread_pool_workers=30, + ) + + worker = TaskHubGrpcWorker(concurrency_options=options) + retrieved_options = worker.concurrency_options + + # Should be the same object + assert retrieved_options is options + + # Should have correct values + assert retrieved_options.maximum_concurrent_activity_work_items == 15 + assert retrieved_options.maximum_concurrent_orchestration_work_items == 25 + assert retrieved_options.maximum_thread_pool_workers == 30 diff --git a/tests/durabletask/test_worker_concurrency_loop.py b/tests/durabletask/test_worker_concurrency_loop.py new file mode 100644 index 0000000..de6753b --- /dev/null +++ b/tests/durabletask/test_worker_concurrency_loop.py @@ -0,0 +1,140 @@ +import asyncio +import threading +import time + +from durabletask.worker import ConcurrencyOptions, TaskHubGrpcWorker + + +class DummyStub: + def __init__(self): + self.completed = [] + + def CompleteOrchestratorTask(self, res): + self.completed.append(('orchestrator', res)) + + def CompleteActivityTask(self, res): + self.completed.append(('activity', res)) + + +class DummyRequest: + def __init__(self, kind, instance_id): + self.kind = kind + self.instanceId = instance_id + self.orchestrationInstance = type('O', (), {'instanceId': instance_id}) + self.name = 'dummy' + self.taskId = 1 + self.input = type('I', (), {'value': ''}) + self.pastEvents = [] + self.newEvents = [] + + def HasField(self, field): + return (field == 'orchestratorRequest' and self.kind == 'orchestrator') or \ + (field == 'activityRequest' and self.kind == 'activity') + + def WhichOneof(self, _): + return f'{self.kind}Request' + + +class DummyCompletionToken: + pass + + +def test_worker_concurrency_loop_sync(): + options = ConcurrencyOptions( + maximum_concurrent_activity_work_items=2, + maximum_concurrent_orchestration_work_items=1, + maximum_thread_pool_workers=2, + ) + worker = TaskHubGrpcWorker(concurrency_options=options) + stub = DummyStub() + + def dummy_orchestrator(req, stub, completionToken): + time.sleep(0.1) + stub.CompleteOrchestratorTask('ok') + + def dummy_activity(req, stub, completionToken): + time.sleep(0.1) + stub.CompleteActivityTask('ok') + + # Patch the worker's _execute_orchestrator and _execute_activity + worker._execute_orchestrator = dummy_orchestrator + worker._execute_activity = dummy_activity + + orchestrator_requests = [DummyRequest('orchestrator', f'orch{i}') for i in range(3)] + activity_requests = [DummyRequest('activity', f'act{i}') for i in range(4)] + + async def run_test(): + # Start the worker manager's run loop in the background + worker_task = asyncio.create_task(worker._async_worker_manager.run()) + for req in orchestrator_requests: + worker._async_worker_manager.submit_orchestration(dummy_orchestrator, req, stub, DummyCompletionToken()) + for req in activity_requests: + worker._async_worker_manager.submit_activity(dummy_activity, req, stub, DummyCompletionToken()) + await asyncio.sleep(1.0) + orchestrator_count = sum(1 for t, _ in stub.completed if t == 'orchestrator') + activity_count = sum(1 for t, _ in stub.completed if t == 'activity') + assert orchestrator_count == 3, f"Expected 3 orchestrator completions, got {orchestrator_count}" + assert activity_count == 4, f"Expected 4 activity completions, got {activity_count}" + worker._async_worker_manager._shutdown = True + await worker_task + asyncio.run(run_test()) + + +# Dummy orchestrator and activity for sync context +def dummy_orchestrator(ctx, input): + # Simulate some work + time.sleep(0.1) + return "orchestrator-done" + + +def dummy_activity(ctx, input): + # Simulate some work + time.sleep(0.1) + return "activity-done" + + +def test_worker_concurrency_sync(): + # Use small concurrency to make test observable + options = ConcurrencyOptions( + maximum_concurrent_activity_work_items=2, + maximum_concurrent_orchestration_work_items=2, + maximum_thread_pool_workers=2, + ) + worker = TaskHubGrpcWorker(concurrency_options=options) + worker.add_orchestrator(dummy_orchestrator) + worker.add_activity(dummy_activity) + + # Simulate submitting work items to the queues directly (bypassing gRPC) + # We'll use the internal _async_worker_manager for this test + manager = worker._async_worker_manager + results = [] + lock = threading.Lock() + + def make_work(kind, idx): + def fn(*args, **kwargs): + time.sleep(0.1) + with lock: + results.append((kind, idx)) + return f"{kind}-{idx}-done" + return fn + + # Submit more work than concurrency allows + for i in range(5): + manager.submit_orchestration(make_work("orch", i)) + manager.submit_activity(make_work("act", i)) + + # Run the manager loop in a thread (sync context) + def run_manager(): + asyncio.run(manager.run()) + + t = threading.Thread(target=run_manager) + t.start() + time.sleep(1.5) # Let work process + manager.shutdown() + # Unblock the consumers by putting dummy items in the queues + manager.activity_queue.put_nowait((lambda: None, (), {})) + manager.orchestration_queue.put_nowait((lambda: None, (), {})) + t.join(timeout=2) + + # Check that all work items completed + assert len(results) == 10 diff --git a/tests/durabletask/test_worker_concurrency_loop_async.py b/tests/durabletask/test_worker_concurrency_loop_async.py new file mode 100644 index 0000000..c7ba238 --- /dev/null +++ b/tests/durabletask/test_worker_concurrency_loop_async.py @@ -0,0 +1,80 @@ +import asyncio + +from durabletask.worker import ConcurrencyOptions, TaskHubGrpcWorker + + +class DummyStub: + def __init__(self): + self.completed = [] + + def CompleteOrchestratorTask(self, res): + self.completed.append(('orchestrator', res)) + + def CompleteActivityTask(self, res): + self.completed.append(('activity', res)) + + +class DummyRequest: + def __init__(self, kind, instance_id): + self.kind = kind + self.instanceId = instance_id + self.orchestrationInstance = type('O', (), {'instanceId': instance_id}) + self.name = 'dummy' + self.taskId = 1 + self.input = type('I', (), {'value': ''}) + self.pastEvents = [] + self.newEvents = [] + + def HasField(self, field): + return (field == 'orchestratorRequest' and self.kind == 'orchestrator') or \ + (field == 'activityRequest' and self.kind == 'activity') + + def WhichOneof(self, _): + return f'{self.kind}Request' + + +class DummyCompletionToken: + pass + + +def test_worker_concurrency_loop_async(): + options = ConcurrencyOptions( + maximum_concurrent_activity_work_items=2, + maximum_concurrent_orchestration_work_items=1, + maximum_thread_pool_workers=2, + ) + grpc_worker = TaskHubGrpcWorker(concurrency_options=options) + stub = DummyStub() + + async def dummy_orchestrator(req, stub, completionToken): + await asyncio.sleep(0.1) + stub.CompleteOrchestratorTask('ok') + + async def dummy_activity(req, stub, completionToken): + await asyncio.sleep(0.1) + stub.CompleteActivityTask('ok') + + # Patch the worker's _execute_orchestrator and _execute_activity + grpc_worker._execute_orchestrator = dummy_orchestrator + grpc_worker._execute_activity = dummy_activity + + orchestrator_requests = [DummyRequest('orchestrator', f'orch{i}') for i in range(3)] + activity_requests = [DummyRequest('activity', f'act{i}') for i in range(4)] + + async def run_test(): + # Clear stub state before each run + stub.completed.clear() + worker_task = asyncio.create_task(grpc_worker._async_worker_manager.run()) + for req in orchestrator_requests: + grpc_worker._async_worker_manager.submit_orchestration(dummy_orchestrator, req, stub, DummyCompletionToken()) + for req in activity_requests: + grpc_worker._async_worker_manager.submit_activity(dummy_activity, req, stub, DummyCompletionToken()) + await asyncio.sleep(1.0) + orchestrator_count = sum(1 for t, _ in stub.completed if t == 'orchestrator') + activity_count = sum(1 for t, _ in stub.completed if t == 'activity') + assert orchestrator_count == 3, f"Expected 3 orchestrator completions, got {orchestrator_count}" + assert activity_count == 4, f"Expected 4 activity completions, got {activity_count}" + grpc_worker._async_worker_manager._shutdown = True + await worker_task + asyncio.run(run_test()) + asyncio.run(run_test()) From 43a4453c51ba0745301253f53daff788896441c6 Mon Sep 17 00:00:00 2001 From: Bernd Verst Date: Tue, 3 Jun 2025 11:46:10 -0700 Subject: [PATCH 14/20] Update GitHub workflows and automate release (#51) * Update GitHub workflows and automate release * Update linter config * more workflow changes * linter fixes * ignore new linter warning * even more workflow cleanup and improvement * declare asyncio as package dependency * Update requirements.txt Signed-off-by: Albert Callarisa --- ...s-sdk.yml => durabletask-azuremanaged.yml} | 17 ++- .github/workflows/durabletask.yml | 108 ++++++++++++++++++ .github/workflows/pr-validation.yml | 59 ---------- pyproject.toml | 3 +- requirements.txt | 4 +- .../test_dts_activity_sequence.py | 6 +- .../test_dts_orchestration_e2e.py | 49 ++++---- .../test_durabletask_grpc_interceptor.py | 14 +-- tests/durabletask/test_orchestration_e2e.py | 7 +- 9 files changed, 163 insertions(+), 104 deletions(-) rename .github/workflows/{publish-dts-sdk.yml => durabletask-azuremanaged.yml} (87%) create mode 100644 .github/workflows/durabletask.yml delete mode 100644 .github/workflows/pr-validation.yml diff --git a/.github/workflows/publish-dts-sdk.yml b/.github/workflows/durabletask-azuremanaged.yml similarity index 87% rename from .github/workflows/publish-dts-sdk.yml rename to .github/workflows/durabletask-azuremanaged.yml index de773f2..73017e4 100644 --- a/.github/workflows/publish-dts-sdk.yml +++ b/.github/workflows/durabletask-azuremanaged.yml @@ -1,4 +1,4 @@ -name: Publish Durable Task Scheduler to PyPI +name: Durable Task Scheduler SDK (durabletask-azuremanaged) on: push: @@ -15,10 +15,10 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - name: Set up Python 3.12 + - name: Set up Python 3.13 uses: actions/setup-python@v5 with: - python-version: 3.12 + python-version: 3.13 - name: Install dependencies working-directory: durabletask-azuremanaged run: | @@ -28,10 +28,17 @@ jobs: - name: Run flake8 Linter working-directory: durabletask-azuremanaged run: flake8 . + - name: Run flake8 Linter + working-directory: tests/durabletask-azuremanaged + run: flake8 . run-docker-tests: + strategy: + fail-fast: false + matrix: + python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] env: - EMULATOR_VERSION: "v0.0.5" # Define the variable + EMULATOR_VERSION: "latest" needs: lint runs-on: ubuntu-latest steps: @@ -84,7 +91,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v5 with: - python-version: "3.12" # Adjust Python version as needed + python-version: "3.13" # Adjust Python version as needed - name: Install dependencies run: | diff --git a/.github/workflows/durabletask.yml b/.github/workflows/durabletask.yml new file mode 100644 index 0000000..4fb3fb0 --- /dev/null +++ b/.github/workflows/durabletask.yml @@ -0,0 +1,108 @@ +name: Durable Task SDK (durabletask) + +on: + push: + branches: + - "main" + tags: + - "v*" # Only run for tags starting with "v" + pull_request: + branches: + - "main" + +jobs: + lint-and-unit-tests: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Set up Python 3.13 + uses: actions/setup-python@v5 + with: + python-version: 3.13 + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install setuptools wheel tox + pip install flake8 + - name: Run flake8 Linter + working-directory: durabletask + run: flake8 . + - name: "Run flake8 linter: tests" + working-directory: tests/durabletask + run: flake8 . + - name: "Run flake8 linter: examples" + working-directory: examples + run: flake8 . + + run-tests: + strategy: + fail-fast: false + matrix: + python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] + needs: lint-and-unit-tests + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + - name: Install durabletask dependencies and the library itself + run: | + python -m pip install --upgrade pip + pip install flake8 pytest + pip install -r requirements.txt + pip install . + - name: Pytest unit tests + working-directory: tests/durabletask + run: | + pytest -m "not e2e and not dts" --verbose + # Sidecar for running e2e tests requires Go SDK + - name: Install Go SDK + uses: actions/setup-go@v5 + with: + go-version: 'stable' + # Install and run the durabletask-go sidecar for running e2e tests + - name: Pytest e2e tests + working-directory: tests/durabletask + run: | + go install github.com/microsoft/durabletask-go@main + durabletask-go --port 4001 & + pytest -m "e2e and not dts" --verbose + + publish: + if: startsWith(github.ref, 'refs/tags/v') # Only run if a matching tag is pushed + needs: run-tests + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Extract version from tag + run: echo "VERSION=${GITHUB_REF#refs/tags/v}" >> $GITHUB_ENV # Extract version from the tag + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.13" # Adjust Python version as needed + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install build twine + + - name: Build package from root directory + run: | + python -m build + + - name: Check package + run: | + twine check dist/* + + - name: Publish package to PyPI + env: + TWINE_USERNAME: __token__ + TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }} # Store your PyPI API token in GitHub Secrets + run: | + twine upload dist/* \ No newline at end of file diff --git a/.github/workflows/pr-validation.yml b/.github/workflows/pr-validation.yml deleted file mode 100644 index 1d14d83..0000000 --- a/.github/workflows/pr-validation.yml +++ /dev/null @@ -1,59 +0,0 @@ -# This workflow will install Python dependencies, run tests and lint with a variety of Python versions -# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python - -name: Build Validation - -on: - push: - branches: [ "main" ] - pull_request: - branches: [ "main" ] - merge_group: - -jobs: - build: - - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] - - steps: - - uses: actions/checkout@v4 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v5 - with: - python-version: ${{ matrix.python-version }} - - name: Install durabletask dependencies and the library itself in editable mode - run: | - python -m pip install --upgrade pip - pip install flake8 pytest - pip install -r requirements.txt - pip install -e . - - name: Install durabletask-azuremanaged dependencies - working-directory: examples/dts - run: | - python -m pip install --upgrade pip - pip install -r requirements.txt - - name: Lint with flake8 - run: | - flake8 . --count --show-source --statistics --exit-zero - - name: Pytest unit tests - working-directory: tests/durabletask - run: | - pytest -m "not e2e and not dts" --verbose - - # Sidecar for running e2e tests requires Go SDK - - name: Install Go SDK - uses: actions/setup-go@v5 - with: - go-version: 'stable' - - # Install and run the durabletask-go sidecar for running e2e tests - - name: Pytest e2e tests - working-directory: tests/durabletask - run: | - go install github.com/microsoft/durabletask-go@main - durabletask-go --port 4001 & - pytest -m "e2e and not dts" --verbose diff --git a/pyproject.toml b/pyproject.toml index 1491988..5438ca4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -26,7 +26,8 @@ license = {file = "LICENSE"} readme = "README.md" dependencies = [ "grpcio", - "protobuf" + "protobuf", + "asyncio" ] [project.urls] diff --git a/requirements.txt b/requirements.txt index 0da7d46..721453b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,5 +3,5 @@ grpcio>=1.60.0 # 1.60.0 is the version introducing protobuf 1.25.X support, newe protobuf pytest pytest-cov -azure-core -azure-identity \ No newline at end of file +azure-identity +asyncio \ No newline at end of file diff --git a/tests/durabletask-azuremanaged/test_dts_activity_sequence.py b/tests/durabletask-azuremanaged/test_dts_activity_sequence.py index c875e49..1a685d0 100644 --- a/tests/durabletask-azuremanaged/test_dts_activity_sequence.py +++ b/tests/durabletask-azuremanaged/test_dts_activity_sequence.py @@ -2,15 +2,15 @@ that calls an activity function in a sequence and prints the outputs.""" import os +import pytest + from durabletask import client, task from durabletask.azuremanaged.client import DurableTaskSchedulerClient from durabletask.azuremanaged.worker import DurableTaskSchedulerWorker -import pytest - - pytestmark = pytest.mark.dts + def hello(ctx: task.ActivityContext, name: str) -> str: """Activity function that returns a greeting""" return f'Hello {name}!' diff --git a/tests/durabletask-azuremanaged/test_dts_orchestration_e2e.py b/tests/durabletask-azuremanaged/test_dts_orchestration_e2e.py index f10e605..9b7603f 100644 --- a/tests/durabletask-azuremanaged/test_dts_orchestration_e2e.py +++ b/tests/durabletask-azuremanaged/test_dts_orchestration_e2e.py @@ -2,9 +2,8 @@ # Licensed under the MIT License. import json -import threading -import time import os +import threading from datetime import timedelta import pytest @@ -21,6 +20,7 @@ taskhub_name = os.getenv("TASKHUB", "default") endpoint = os.getenv("ENDPOINT", "http://localhost:8080") + def test_empty_orchestration(): invoked = False @@ -31,12 +31,12 @@ def empty_orchestrator(ctx: task.OrchestrationContext, _): # Start a worker, which will connect to the sidecar in a background thread with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, - taskhub=taskhub_name, token_credential=None) as w: + taskhub=taskhub_name, token_credential=None) as w: w.add_orchestrator(empty_orchestrator) w.start() c = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, - taskhub=taskhub_name, token_credential=None) + taskhub=taskhub_name, token_credential=None) id = c.schedule_new_orchestration(empty_orchestrator) state = c.wait_for_orchestration_completion(id, timeout=30) @@ -66,13 +66,13 @@ def sequence(ctx: task.OrchestrationContext, start_val: int): # Start a worker, which will connect to the sidecar in a background thread with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, - taskhub=taskhub_name, token_credential=None) as w: + taskhub=taskhub_name, token_credential=None) as w: w.add_orchestrator(sequence) w.add_activity(plus_one) w.start() task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, - taskhub=taskhub_name, token_credential=None) + taskhub=taskhub_name, token_credential=None) id = task_hub_client.schedule_new_orchestration(sequence, input=1) state = task_hub_client.wait_for_orchestration_completion( id, timeout=30) @@ -113,14 +113,14 @@ def orchestrator(ctx: task.OrchestrationContext, input: int): # Start a worker, which will connect to the sidecar in a background thread with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, - taskhub=taskhub_name, token_credential=None) as w: + taskhub=taskhub_name, token_credential=None) as w: w.add_orchestrator(orchestrator) w.add_activity(throw) w.add_activity(increment_counter) w.start() task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, - taskhub=taskhub_name, token_credential=None) + taskhub=taskhub_name, token_credential=None) id = task_hub_client.schedule_new_orchestration(orchestrator, input=1) state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) @@ -158,14 +158,14 @@ def parent_orchestrator(ctx: task.OrchestrationContext, count: int): # Start a worker, which will connect to the sidecar in a background thread with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, - taskhub=taskhub_name, token_credential=None) as w: + taskhub=taskhub_name, token_credential=None) as w: w.add_activity(increment) w.add_orchestrator(orchestrator_child) w.add_orchestrator(parent_orchestrator) w.start() task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, - taskhub=taskhub_name, token_credential=None) + taskhub=taskhub_name, token_credential=None) id = task_hub_client.schedule_new_orchestration(parent_orchestrator, input=10) state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) @@ -184,13 +184,13 @@ def orchestrator(ctx: task.OrchestrationContext, _): # Start a worker, which will connect to the sidecar in a background thread with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, - taskhub=taskhub_name, token_credential=None) as w: + taskhub=taskhub_name, token_credential=None) as w: w.add_orchestrator(orchestrator) w.start() # Start the orchestration and immediately raise events to it. task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, - taskhub=taskhub_name, token_credential=None) + taskhub=taskhub_name, token_credential=None) id = task_hub_client.schedule_new_orchestration(orchestrator) task_hub_client.raise_orchestration_event(id, 'A', data='a') task_hub_client.raise_orchestration_event(id, 'B', data='b') @@ -285,12 +285,12 @@ def orchestrator(ctx: task.OrchestrationContext, _): # Start a worker, which will connect to the sidecar in a background thread with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, - taskhub=taskhub_name, token_credential=None) as w: + taskhub=taskhub_name, token_credential=None) as w: w.add_orchestrator(orchestrator) w.start() task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, - taskhub=taskhub_name, token_credential=None) + taskhub=taskhub_name, token_credential=None) id = task_hub_client.schedule_new_orchestration(orchestrator) state = task_hub_client.wait_for_orchestration_start(id, timeout=30) assert state is not None @@ -302,23 +302,25 @@ def orchestrator(ctx: task.OrchestrationContext, _): assert state.runtime_status == client.OrchestrationStatus.TERMINATED assert state.serialized_output == json.dumps("some reason for termination") + def test_terminate_recursive(): def root(ctx: task.OrchestrationContext, _): result = yield ctx.call_sub_orchestrator(child) return result + def child(ctx: task.OrchestrationContext, _): result = yield ctx.wait_for_external_event("my_event") return result # Start a worker, which will connect to the sidecar in a background thread with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, - taskhub=taskhub_name, token_credential=None) as w: + taskhub=taskhub_name, token_credential=None) as w: w.add_orchestrator(root) w.add_orchestrator(child) w.start() task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, - taskhub=taskhub_name, token_credential=None) + taskhub=taskhub_name, token_credential=None) id = task_hub_client.schedule_new_orchestration(root) state = task_hub_client.wait_for_orchestration_start(id, timeout=30) assert state is not None @@ -331,7 +333,7 @@ def child(ctx: task.OrchestrationContext, _): assert state.runtime_status == client.OrchestrationStatus.TERMINATED # Verify that child orchestration is also terminated - c = task_hub_client.wait_for_orchestration_completion(id, timeout=30) + task_hub_client.wait_for_orchestration_completion(id, timeout=30) assert state is not None assert state.runtime_status == client.OrchestrationStatus.TERMINATED @@ -417,14 +419,14 @@ def throw_activity_with_retry(ctx: task.ActivityContext, _): raise RuntimeError("Kah-BOOOOM!!!") with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, - taskhub=taskhub_name, token_credential=None) as w: + taskhub=taskhub_name, token_credential=None) as w: w.add_orchestrator(parent_orchestrator_with_retry) w.add_orchestrator(child_orchestrator_with_retry) w.add_activity(throw_activity_with_retry) w.start() task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, - taskhub=taskhub_name, token_credential=None) + taskhub=taskhub_name, token_credential=None) id = task_hub_client.schedule_new_orchestration(parent_orchestrator_with_retry) state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) assert state is not None @@ -460,13 +462,13 @@ def throw_activity(ctx: task.ActivityContext, _): raise RuntimeError("Kah-BOOOOM!!!") with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, - taskhub=taskhub_name, token_credential=None) as w: + taskhub=taskhub_name, token_credential=None) as w: w.add_orchestrator(mock_orchestrator) w.add_activity(throw_activity) w.start() task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, - taskhub=taskhub_name, token_credential=None) + taskhub=taskhub_name, token_credential=None) id = task_hub_client.schedule_new_orchestration(mock_orchestrator) state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) assert state is not None @@ -477,6 +479,7 @@ def throw_activity(ctx: task.ActivityContext, _): assert state.failure_details.stack_trace is not None assert throw_activity_counter == 4 + def test_custom_status(): def empty_orchestrator(ctx: task.OrchestrationContext, _): @@ -484,12 +487,12 @@ def empty_orchestrator(ctx: task.OrchestrationContext, _): # Start a worker, which will connect to the sidecar in a background thread with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, - taskhub=taskhub_name, token_credential=None) as w: + taskhub=taskhub_name, token_credential=None) as w: w.add_orchestrator(empty_orchestrator) w.start() c = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, - taskhub=taskhub_name, token_credential=None) + taskhub=taskhub_name, token_credential=None) id = c.schedule_new_orchestration(empty_orchestrator) state = c.wait_for_orchestration_completion(id, timeout=30) diff --git a/tests/durabletask-azuremanaged/test_durabletask_grpc_interceptor.py b/tests/durabletask-azuremanaged/test_durabletask_grpc_interceptor.py index 62978f9..0480d3d 100644 --- a/tests/durabletask-azuremanaged/test_durabletask_grpc_interceptor.py +++ b/tests/durabletask-azuremanaged/test_durabletask_grpc_interceptor.py @@ -1,7 +1,6 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -import threading import unittest from concurrent import futures from importlib.metadata import version @@ -9,20 +8,17 @@ import grpc from durabletask.azuremanaged.client import DurableTaskSchedulerClient -from durabletask.azuremanaged.internal.durabletask_grpc_interceptor import ( - DTSDefaultClientInterceptorImpl, -) from durabletask.internal import orchestrator_service_pb2 as pb from durabletask.internal import orchestrator_service_pb2_grpc as stubs class MockTaskHubSidecarServiceServicer(stubs.TaskHubSidecarServiceServicer): """Mock implementation of the TaskHubSidecarService for testing.""" - + def __init__(self): self.captured_metadata = {} self.requests_received = 0 - + def GetInstance(self, request, context): """Implementation of GetInstance that captures the metadata.""" # Store all metadata key-value pairs from the context @@ -38,7 +34,7 @@ def GetInstance(self, request, context): class TestDurableTaskGrpcInterceptor(unittest.TestCase): """Tests for the DTSDefaultClientInterceptorImpl class.""" - + @classmethod def setUpClass(cls): # Start a real gRPC server on a free port @@ -52,11 +48,11 @@ def setUpClass(cls): # Start the server in a background thread cls.server.start() - + @classmethod def tearDownClass(cls): cls.server.stop(grace=None) - + def test_user_agent_metadata_passed_in_request(self): """Test that the user agent metadata is correctly passed in gRPC requests.""" # Create a client that connects to our mock server diff --git a/tests/durabletask/test_orchestration_e2e.py b/tests/durabletask/test_orchestration_e2e.py index d3d7f0b..3ccf782 100644 --- a/tests/durabletask/test_orchestration_e2e.py +++ b/tests/durabletask/test_orchestration_e2e.py @@ -278,10 +278,12 @@ def orchestrator(ctx: task.OrchestrationContext, _): assert state.runtime_status == client.OrchestrationStatus.TERMINATED assert state.serialized_output == json.dumps("some reason for termination") + def test_terminate_recursive(): def root(ctx: task.OrchestrationContext, _): result = yield ctx.call_sub_orchestrator(child) return result + def child(ctx: task.OrchestrationContext, _): result = yield ctx.wait_for_external_event("my_event") return result @@ -305,7 +307,7 @@ def child(ctx: task.OrchestrationContext, _): assert state.runtime_status == client.OrchestrationStatus.TERMINATED # Verify that child orchestration is also terminated - c = task_hub_client.wait_for_orchestration_completion(id, timeout=30) + task_hub_client.wait_for_orchestration_completion(id, timeout=30) assert state is not None assert state.runtime_status == client.OrchestrationStatus.TERMINATED @@ -321,7 +323,7 @@ def orchestrator(ctx: task.OrchestrationContext, input: int): result = yield ctx.wait_for_external_event("my_event") if not ctx.is_replaying: # NOTE: Real orchestrations should never interact with nonlocal variables like this. - nonlocal all_results + nonlocal all_results # noqa: F824 all_results.append(result) if len(all_results) <= 4: @@ -445,6 +447,7 @@ def throw_activity(ctx: task.ActivityContext, _): assert state.failure_details.stack_trace is not None assert throw_activity_counter == 4 + def test_custom_status(): def empty_orchestrator(ctx: task.OrchestrationContext, _): From dfec5dac13d32282a921e9ca0c2b5caf3e782f6c Mon Sep 17 00:00:00 2001 From: Elena Kolevska Date: Tue, 3 Jun 2025 21:29:05 +0100 Subject: [PATCH 15/20] Updates instructions for running e2e tests to match CI (#37) Signed-off-by: Elena Kolevska Co-authored-by: Bernd Verst Signed-off-by: Albert Callarisa --- README.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 87af41d..b9d829c 100644 --- a/README.md +++ b/README.md @@ -177,10 +177,11 @@ make test-unit ### Running E2E tests -The E2E (end-to-end) tests require a sidecar process to be running. You can use the Dapr sidecar for this or run a Durable Task test sidecar using the following `docker` command: +The E2E (end-to-end) tests require a sidecar process to be running. You can use the Dapr sidecar for this or run a Durable Task test sidecar using the following command: ```sh -docker run --name durabletask-sidecar -p 4001:4001 --env 'DURABLETASK_SIDECAR_LOGLEVEL=Debug' --rm cgillum/durabletask-sidecar:latest start --backend Emulator +go install github.com/microsoft/durabletask-go@main +durabletask-go --port 4001 ``` To run the E2E tests, run the following command from the project root: From 89437eb2677227fba400ae39150f0f402e62e9e8 Mon Sep 17 00:00:00 2001 From: Albert Callarisa Date: Wed, 18 Jun 2025 12:13:07 +0200 Subject: [PATCH 16/20] Bring examples back Signed-off-by: Albert Callarisa --- examples/README.md | 27 ++++++++++ examples/activity_sequence.py | 35 +++++++++++++ examples/fanout_fanin.py | 62 ++++++++++++++++++++++ examples/human_interaction.py | 99 +++++++++++++++++++++++++++++++++++ 4 files changed, 223 insertions(+) create mode 100644 examples/README.md create mode 100644 examples/activity_sequence.py create mode 100644 examples/fanout_fanin.py create mode 100644 examples/human_interaction.py diff --git a/examples/README.md b/examples/README.md new file mode 100644 index 0000000..404b127 --- /dev/null +++ b/examples/README.md @@ -0,0 +1,27 @@ +# Examples + +This directory contains examples of how to author durable orchestrations using the Durable Task Python SDK. + +## Prerequisites + +All the examples assume that you have a Durable Task-compatible sidecar running locally. There are two options for this: + +1. Install the latest version of the [Dapr CLI](https://docs.dapr.io/getting-started/install-dapr-cli/), which contains and exposes an embedded version of the Durable Task engine. The setup process (which requires Docker) will configure the workflow engine to store state in a local Redis container. + +2. Clone and run the [Durable Task Sidecar](https://github.com/microsoft/durabletask-go) project locally (requires Go 1.18 or higher). Orchestration state will be stored in a local sqlite database. + +## Running the examples + +With one of the sidecars running, you can simply execute any of the examples in this directory using `python3`: + +```sh +python3 ./activity_sequence.py +``` + +In some cases, the sample may require command-line parameters or user inputs. In these cases, the sample will print out instructions on how to proceed. + +## List of examples + +- [Activity sequence](./activity_sequence.py): Orchestration that schedules three activity calls in a sequence. +- [Fan-out/fan-in](./fanout_fanin.py): Orchestration that schedules a dynamic number of activity calls in parallel, waits for all of them to complete, and then performs an aggregation on the results. +- [Human interaction](./human_interaction.py): Orchestration that waits for a human to approve an order before continuing. diff --git a/examples/activity_sequence.py b/examples/activity_sequence.py new file mode 100644 index 0000000..066a733 --- /dev/null +++ b/examples/activity_sequence.py @@ -0,0 +1,35 @@ +"""End-to-end sample that demonstrates how to configure an orchestrator +that calls an activity function in a sequence and prints the outputs.""" +from durabletask import client, task, worker + + +def hello(ctx: task.ActivityContext, name: str) -> str: + """Activity function that returns a greeting""" + return f'Hello {name}!' + + +def sequence(ctx: task.OrchestrationContext, _): + """Orchestrator function that calls the 'hello' activity function in a sequence""" + # call "hello" activity function in a sequence + result1 = yield ctx.call_activity(hello, input='Tokyo') + result2 = yield ctx.call_activity(hello, input='Seattle') + result3 = yield ctx.call_activity(hello, input='London') + + # return an array of results + return [result1, result2, result3] + + +# configure and start the worker +with worker.TaskHubGrpcWorker() as w: + w.add_orchestrator(sequence) + w.add_activity(hello) + w.start() + + # create a client, start an orchestration, and wait for it to finish + c = client.TaskHubGrpcClient() + instance_id = c.schedule_new_orchestration(sequence) + state = c.wait_for_orchestration_completion(instance_id, timeout=10) + if state and state.runtime_status == client.OrchestrationStatus.COMPLETED: + print(f'Orchestration completed! Result: {state.serialized_output}') + elif state: + print(f'Orchestration failed: {state.failure_details}') diff --git a/examples/fanout_fanin.py b/examples/fanout_fanin.py new file mode 100644 index 0000000..c53744f --- /dev/null +++ b/examples/fanout_fanin.py @@ -0,0 +1,62 @@ +"""End-to-end sample that demonstrates how to configure an orchestrator +that a dynamic number activity functions in parallel, waits for them all +to complete, and prints an aggregate summary of the outputs.""" +import random +import time + +from durabletask import client, task, worker + + +def get_work_items(ctx: task.ActivityContext, _) -> list[str]: + """Activity function that returns a list of work items""" + # return a random number of work items + count = random.randint(2, 10) + print(f'generating {count} work items...') + return [f'work item {i}' for i in range(count)] + + +def process_work_item(ctx: task.ActivityContext, item: str) -> int: + """Activity function that returns a result for a given work item""" + print(f'processing work item: {item}') + + # simulate some work that takes a variable amount of time + time.sleep(random.random() * 5) + + # return a result for the given work item, which is also a random number in this case + return random.randint(0, 10) + + +def orchestrator(ctx: task.OrchestrationContext, _): + """Orchestrator function that calls the 'get_work_items' and 'process_work_item' + activity functions in parallel, waits for them all to complete, and prints + an aggregate summary of the outputs""" + + work_items: list[str] = yield ctx.call_activity(get_work_items) + + # execute the work-items in parallel and wait for them all to return + tasks = [ctx.call_activity(process_work_item, input=item) for item in work_items] + results: list[int] = yield task.when_all(tasks) + + # return an aggregate summary of the results + return { + 'work_items': work_items, + 'results': results, + 'total': sum(results), + } + + +# configure and start the worker +with worker.TaskHubGrpcWorker() as w: + w.add_orchestrator(orchestrator) + w.add_activity(process_work_item) + w.add_activity(get_work_items) + w.start() + + # create a client, start an orchestration, and wait for it to finish + c = client.TaskHubGrpcClient() + instance_id = c.schedule_new_orchestration(orchestrator) + state = c.wait_for_orchestration_completion(instance_id, timeout=30) + if state and state.runtime_status == client.OrchestrationStatus.COMPLETED: + print(f'Orchestration completed! Result: {state.serialized_output}') + elif state: + print(f'Orchestration failed: {state.failure_details}') diff --git a/examples/human_interaction.py b/examples/human_interaction.py new file mode 100644 index 0000000..2a01897 --- /dev/null +++ b/examples/human_interaction.py @@ -0,0 +1,99 @@ +"""End-to-end sample that demonstrates how to configure an orchestrator +that waits for an "approval" event before proceding to the next step. If +the approval isn't received within a specified timeout, the order that is +represented by the orchestration is automatically cancelled.""" + +import threading +import time +from collections import namedtuple +from dataclasses import dataclass +from datetime import timedelta + +from durabletask import client, task, worker + + +@dataclass +class Order: + """Represents a purchase order""" + Cost: float + Product: str + Quantity: int + + def __str__(self): + return f'{self.Product} ({self.Quantity})' + + +def send_approval_request(_: task.ActivityContext, order: Order) -> None: + """Activity function that sends an approval request to the manager""" + time.sleep(5) + print(f'*** Sending approval request for order: {order}') + + +def place_order(_: task.ActivityContext, order: Order) -> None: + """Activity function that places an order""" + print(f'*** Placing order: {order}') + + +def purchase_order_workflow(ctx: task.OrchestrationContext, order: Order): + """Orchestrator function that represents a purchase order workflow""" + # Orders under $1000 are auto-approved + if order.Cost < 1000: + return "Auto-approved" + + # Orders of $1000 or more require manager approval + yield ctx.call_activity(send_approval_request, input=order) + + # Approvals must be received within 24 hours or they will be canceled. + approval_event = ctx.wait_for_external_event("approval_received") + timeout_event = ctx.create_timer(timedelta(hours=24)) + winner = yield task.when_any([approval_event, timeout_event]) + if winner == timeout_event: + return "Cancelled" + + # The order was approved + yield ctx.call_activity(place_order, input=order) + approval_details = approval_event.get_result() + return f"Approved by '{approval_details.approver}'" + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser(description="Order purchasing workflow demo.") + parser.add_argument("--cost", type=int, default=2000, help="Cost of the order") + parser.add_argument("--approver", type=str, default="Me", help="Approver name") + parser.add_argument("--timeout", type=int, default=60, help="Timeout in seconds") + args = parser.parse_args() + + # configure and start the worker + with worker.TaskHubGrpcWorker() as w: + w.add_orchestrator(purchase_order_workflow) + w.add_activity(send_approval_request) + w.add_activity(place_order) + w.start() + + c = client.TaskHubGrpcClient() + + # Start a purchase order workflow using the user input + order = Order(args.cost, "MyProduct", 1) + instance_id = c.schedule_new_orchestration(purchase_order_workflow, input=order) + + def prompt_for_approval(): + input("Press [ENTER] to approve the order...\n") + approval_event = namedtuple("Approval", ["approver"])(args.approver) + c.raise_orchestration_event(instance_id, "approval_received", data=approval_event) + + # Prompt the user for approval on a background thread + threading.Thread(target=prompt_for_approval, daemon=True).start() + + # Wait for the orchestration to complete + try: + state = c.wait_for_orchestration_completion(instance_id, timeout=args.timeout + 2) + if not state: + print("Workflow not found!") # not expected + elif state.runtime_status == client.OrchestrationStatus.COMPLETED: + print(f'Orchestration completed! Result: {state.serialized_output}') + else: + state.raise_if_failed() # raises an exception + except TimeoutError: + print("*** Orchestration timed out!") From 3439afc91b29a65dc82b881cf484bb386507e758 Mon Sep 17 00:00:00 2001 From: Albert Callarisa Date: Wed, 18 Jun 2025 12:14:25 +0200 Subject: [PATCH 17/20] Remove misleading entry in changelog The changelog entry removed mentions `azuremanaged`, and that's something we are not bringing from upstream. Signed-off-by: Albert Callarisa --- CHANGELOG.md | 1 - 1 file changed, 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6921faa..376221e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,7 +21,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Added `set_custom_status` orchestrator API ([#31](https://github.com/microsoft/durabletask-python/pull/31)) - contributed by [@famarting](https://github.com/famarting) - Added `purge_orchestration` client API ([#34](https://github.com/microsoft/durabletask-python/pull/34)) - contributed by [@famarting](https://github.com/famarting) -- Added new `durabletask-azuremanaged` package for use with the [Durable Task Scheduler](https://learn.microsoft.com/azure/azure-functions/durable/durable-task-scheduler/durable-task-scheduler) - by [@RyanLettieri](https://github.com/RyanLettieri) ### Changes From 76033a2e2dbe680dbc7d6fcca3be447aee5f1415 Mon Sep 17 00:00:00 2001 From: Albert Callarisa Date: Wed, 18 Jun 2025 12:56:40 +0200 Subject: [PATCH 18/20] Fixed examples readme with specific dapr instructions Signed-off-by: Albert Callarisa --- examples/README.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/examples/README.md b/examples/README.md index 404b127..a6cd847 100644 --- a/examples/README.md +++ b/examples/README.md @@ -8,7 +8,11 @@ All the examples assume that you have a Durable Task-compatible sidecar running 1. Install the latest version of the [Dapr CLI](https://docs.dapr.io/getting-started/install-dapr-cli/), which contains and exposes an embedded version of the Durable Task engine. The setup process (which requires Docker) will configure the workflow engine to store state in a local Redis container. -2. Clone and run the [Durable Task Sidecar](https://github.com/microsoft/durabletask-go) project locally (requires Go 1.18 or higher). Orchestration state will be stored in a local sqlite database. +2. Run the [Durable Task Sidecar](https://github.com/dapr/durabletask-go) project locally (requires Go 1.18 or higher). Orchestration state will be stored in a local sqlite database. + ```sh + go install github.com/dapr/durabletask-go@main + durabletask-go --port 4001 + ``` ## Running the examples From f9e2bf5d72963a8083b0f9c2dfb03c09e6bcec3c Mon Sep 17 00:00:00 2001 From: Albert Callarisa Date: Mon, 30 Jun 2025 07:52:28 +0200 Subject: [PATCH 19/20] Use pinned grpcio-tools version For context: https://github.com/dapr/durabletask-python/pull/12#discussion_r2154292531 Signed-off-by: Albert Callarisa --- dev-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-requirements.txt b/dev-requirements.txt index b3ff6f7..119f072 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -1 +1 @@ -grpcio-tools +grpcio-tools==1.62.3 # 1.62.X is the latest version before protobuf 1.26.X is used which has breaking changes for Python From 6f12d1dad0c47ebaa8cf36e4ff85dafa7d6d9bb2 Mon Sep 17 00:00:00 2001 From: Albert Callarisa Date: Mon, 7 Jul 2025 14:20:18 +0200 Subject: [PATCH 20/20] fix tests Signed-off-by: Albert Callarisa --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index 04ea774..8c4d1e4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -44,6 +44,7 @@ local_scheme = "no-local-version" [tool.pytest.ini_options] minversion = "6.0" testpaths = ["tests"] +pythonpath = ["."] markers = [ "e2e: mark a test as an end-to-end test that requires a running sidecar" ]