From 331b96761157ee5d8610d7f7cc6c2966c8f4a461 Mon Sep 17 00:00:00 2001 From: Elena Kolevska Date: Mon, 25 Nov 2024 14:55:12 +0000 Subject: [PATCH 01/81] Sets up CI for publishing package to pypy (#1) * Sets up CI for publishing to pypy Signed-off-by: Elena Kolevska * mention fork in the readme page Signed-off-by: Elena Kolevska --------- Signed-off-by: Elena Kolevska --- .github/workflows/pr-validation.yml | 23 +++++++++++++++++++++++ README.md | 2 +- pyproject.toml | 6 +++--- 3 files changed, 27 insertions(+), 4 deletions(-) diff --git a/.github/workflows/pr-validation.yml b/.github/workflows/pr-validation.yml index 4c09e6b..59d47cb 100644 --- a/.github/workflows/pr-validation.yml +++ b/.github/workflows/pr-validation.yml @@ -6,6 +6,7 @@ name: Build Validation on: push: branches: [ "main" ] + tags: ["v*"] pull_request: branches: [ "main" ] @@ -35,3 +36,25 @@ jobs: - name: Pytest unit tests run: | pytest -m "not e2e" --verbose + publish: + needs: build + if: startswith(github.ref, 'refs/tags/v') + runs-on: ubuntu-latest + env: + TWINE_USERNAME: "__token__" + steps: + - uses: actions/checkout@v4 + - name: Set up Python 3.11 + uses: actions/setup-python@v5 + with: + python-version: 3.11 + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install setuptools wheel twine + - name: Build and publish Dapr Python SDK + env: + TWINE_PASSWORD: ${{ secrets.PYPI_UPLOAD_PASS }} + run: | + python -m build + twine upload dist/* \ No newline at end of file diff --git a/README.md b/README.md index 22b3c44..47e7a00 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# Durable Task Client SDK for Python +# Durable Task Client SDK for Python (Dapr fork) [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT) [![Build Validation](https://github.com/microsoft/durabletask-python/actions/workflows/pr-validation.yml/badge.svg)](https://github.com/microsoft/durabletask-python/actions/workflows/pr-validation.yml) diff --git a/pyproject.toml b/pyproject.toml index d57957d..b6cb061 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -8,7 +8,7 @@ requires = ["setuptools", "wheel"] build-backend = "setuptools.build_meta" [project] -name = "durabletask" +name = "durabletask-dapr" version = "0.1.1-alpha.1" description = "A Durable Task Client SDK for Python" keywords = [ @@ -29,8 +29,8 @@ dependencies = [ ] [project.urls] -repository = "https://github.com/microsoft/durabletask-python" -changelog = "https://github.com/microsoft/durabletask-python/blob/main/CHANGELOG.md" +repository = "https://github.com/dapr/durabletask-python" +changelog = "https://github.com/dapr/durabletask-python/blob/main/CHANGELOG.md" [tool.setuptools.packages.find] include = ["durabletask", "durabletask.*"] From 076bad5b3900ee84af0db52228768df7c27db202 Mon Sep 17 00:00:00 2001 From: Elena Kolevska Date: Tue, 26 Nov 2024 01:47:35 +0000 Subject: [PATCH 02/81] Adds build package (#2) Signed-off-by: Elena Kolevska --- .github/workflows/pr-validation.yml | 42 ++++++++++++++--------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/.github/workflows/pr-validation.yml b/.github/workflows/pr-validation.yml index 59d47cb..d6a0c37 100644 --- a/.github/workflows/pr-validation.yml +++ b/.github/workflows/pr-validation.yml @@ -37,24 +37,24 @@ jobs: run: | pytest -m "not e2e" --verbose publish: - needs: build - if: startswith(github.ref, 'refs/tags/v') - runs-on: ubuntu-latest - env: - TWINE_USERNAME: "__token__" - steps: - - uses: actions/checkout@v4 - - name: Set up Python 3.11 - uses: actions/setup-python@v5 - with: - python-version: 3.11 - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install setuptools wheel twine - - name: Build and publish Dapr Python SDK - env: - TWINE_PASSWORD: ${{ secrets.PYPI_UPLOAD_PASS }} - run: | - python -m build - twine upload dist/* \ No newline at end of file + needs: build + if: startswith(github.ref, 'refs/tags/v') + runs-on: ubuntu-latest + env: + TWINE_USERNAME: "__token__" + steps: + - uses: actions/checkout@v4 + - name: Set up Python 3.11 + uses: actions/setup-python@v5 + with: + python-version: 3.11 + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install setuptools wheel twine build + - name: Build and publish Dapr Python SDK + env: + TWINE_PASSWORD: ${{ secrets.PYPI_UPLOAD_PASS }} + run: | + python -m build + twine upload dist/* From 4fc38e2329e12c4aadad934a8eebbe259e94551e Mon Sep 17 00:00:00 2001 From: Elena Kolevska Date: Wed, 27 Nov 2024 14:15:15 +0000 Subject: [PATCH 03/81] Get version from tag instead of hardcoding it (#3) Signed-off-by: Elena Kolevska --- .github/workflows/pr-validation.yml | 4 +++- pyproject.toml | 8 ++++++-- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/.github/workflows/pr-validation.yml b/.github/workflows/pr-validation.yml index d6a0c37..1fdee99 100644 --- a/.github/workflows/pr-validation.yml +++ b/.github/workflows/pr-validation.yml @@ -20,7 +20,7 @@ jobs: python-version: ["3.8", "3.9", "3.10", "3.11"] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v3 with: @@ -44,6 +44,8 @@ jobs: TWINE_USERNAME: "__token__" steps: - uses: actions/checkout@v4 + with: + fetch-depth: 0 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: diff --git a/pyproject.toml b/pyproject.toml index b6cb061..ed94136 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,12 +4,12 @@ # For more information on pyproject.toml, see https://peps.python.org/pep-0621/ [build-system] -requires = ["setuptools", "wheel"] +requires = ["setuptools", "wheel", "setuptools_scm"] build-backend = "setuptools.build_meta" [project] name = "durabletask-dapr" -version = "0.1.1-alpha.1" +dynamic = ["version"] description = "A Durable Task Client SDK for Python" keywords = [ "durable", @@ -35,6 +35,10 @@ changelog = "https://github.com/dapr/durabletask-python/blob/main/CHANGELOG.md" [tool.setuptools.packages.find] include = ["durabletask", "durabletask.*"] +[tool.setuptools_scm] +version_scheme = "guess-next-dev" +local_scheme = "no-local-version" + [tool.pytest.ini_options] minversion = "6.0" testpaths = ["tests"] From 60c4633f372bef8ac8db982ba96fca08ddd53ca1 Mon Sep 17 00:00:00 2001 From: Elena Kolevska Date: Mon, 13 Jan 2025 16:24:08 +0000 Subject: [PATCH 04/81] Syncs with upstream (bumps min python version, regenerates protos with an older grpcio-tools version) (#6) * Update version to 0.2b1, require Python 3.9+, and enhance GitHub Actions workflow (#1) (#35) - Bump version in `pyproject.toml` to 0.2b1 and update Python requirement to >=3.9. - Add `protobuf` dependency in `requirements.txt`. - Update GitHub Actions workflow to support Python versions 3.9 to 3.13 and upgrade action versions. - Refactor type hints in various files to use `Optional` and `list` instead of `Union` and `List`. - Improve handling of custom status in orchestration context and related functions. - Fix purge implementation to pass required parameters. Signed-off-by: Elena Kolevska * Downgrade required `grpcio` and `protobuf` versions (#36) Signed-off-by: Elena Kolevska --------- Signed-off-by: Elena Kolevska Co-authored-by: Chris Gillum Co-authored-by: Bernd Verst --- .github/workflows/pr-validation.yml | 17 +- .vscode/settings.json | 5 +- CHANGELOG.md | 4 + Makefile | 5 +- README.md | 1 + dev-requirements.txt | 1 + durabletask/client.py | 40 +- durabletask/internal/__init__.py | 0 durabletask/internal/grpc_interceptor.py | 3 +- durabletask/internal/helpers.py | 32 +- .../internal/orchestrator_service_pb2.py | 386 +++++----- .../internal/orchestrator_service_pb2_grpc.py | 673 ++++++------------ durabletask/internal/shared.py | 15 +- durabletask/task.py | 30 +- durabletask/worker.py | 50 +- examples/fanout_fanin.py | 7 +- pyproject.toml | 2 +- requirements.txt | 6 +- tests/test_activity_executor.py | 4 +- tests/test_orchestration_e2e.py | 2 +- tests/test_orchestration_executor.py | 3 +- 21 files changed, 529 insertions(+), 757 deletions(-) create mode 100644 dev-requirements.txt delete mode 100644 durabletask/internal/__init__.py diff --git a/.github/workflows/pr-validation.yml b/.github/workflows/pr-validation.yml index 1fdee99..1bf04a8 100644 --- a/.github/workflows/pr-validation.yml +++ b/.github/workflows/pr-validation.yml @@ -17,12 +17,12 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.8", "3.9", "3.10", "3.11"] + python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] steps: - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v3 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - name: Install dependencies @@ -36,6 +36,17 @@ jobs: - name: Pytest unit tests run: | pytest -m "not e2e" --verbose + # Sidecar for running e2e tests requires Go SDK + - name: Install Go SDK + uses: actions/setup-go@v5 + with: + go-version: 'stable' + # Install and run the durabletask-go sidecar for running e2e tests + - name: Pytest e2e tests + run: | + go install github.com/microsoft/durabletask-go@main + durabletask-go --port 4001 & + pytest -m "e2e" --verbose publish: needs: build if: startswith(github.ref, 'refs/tags/v') @@ -59,4 +70,4 @@ jobs: TWINE_PASSWORD: ${{ secrets.PYPI_UPLOAD_PASS }} run: | python -m build - twine upload dist/* + twine upload dist/* \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json index d737b0b..1c929ac 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -3,7 +3,7 @@ "editor.defaultFormatter": "ms-python.autopep8", "editor.formatOnSave": true, "editor.codeActionsOnSave": { - "source.organizeImports": true, + "source.organizeImports": "explicit" }, "editor.rulers": [ 119 @@ -29,5 +29,6 @@ "coverage.xml", "jacoco.xml", "coverage.cobertura.xml" - ] + ], + "makefile.configureOnOpen": false } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index fc4b3d2..a09078d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Added `set_custom_status` orchestrator API ([#31](https://github.com/microsoft/durabletask-python/pull/31)) - contributed by [@famarting](https://github.com/famarting) - Added `purge_orchestration` client API ([#34](https://github.com/microsoft/durabletask-python/pull/34)) - contributed by [@famarting](https://github.com/famarting) +### Changes + +- Protos are compiled with gRPC 1.62.3 / protobuf 3.25.X instead of the latest release. This ensures compatibility with a wider range of grpcio versions for better compatibility with other packages / libraries. + ### Updates - Updated `durabletask-protobuf` submodule reference to latest diff --git a/Makefile b/Makefile index 16b883e..68a9b89 100644 --- a/Makefile +++ b/Makefile @@ -11,7 +11,8 @@ install: python3 -m pip install . gen-proto: -# NOTE: There is currently a hand-edit that we make to the generated orchestrator_service_pb2.py file after it's generated to help resolve import problems. - python3 -m grpc_tools.protoc --proto_path=./submodules/durabletask-protobuf/protos --python_out=./durabletask/internal --pyi_out=./durabletask/internal --grpc_python_out=./durabletask/internal orchestrator_service.proto + cp ./submodules/durabletask-protobuf/protos/orchestrator_service.proto durabletask/internal/orchestrator_service.proto + python3 -m grpc_tools.protoc --proto_path=. --python_out=. --pyi_out=. --grpc_python_out=. ./durabletask/internal/orchestrator_service.proto + rm durabletask/internal/*.proto .PHONY: init test-unit test-e2e gen-proto install diff --git a/README.md b/README.md index 47e7a00..443ea99 100644 --- a/README.md +++ b/README.md @@ -170,6 +170,7 @@ git submodule update --init Once the submodule is available, the corresponding source code can be regenerated using the following command from the project root: ```sh +pip3 install -r dev-requirements.txt make gen-proto ``` diff --git a/dev-requirements.txt b/dev-requirements.txt new file mode 100644 index 0000000..119f072 --- /dev/null +++ b/dev-requirements.txt @@ -0,0 +1 @@ +grpcio-tools==1.62.3 # 1.62.X is the latest version before protobuf 1.26.X is used which has breaking changes for Python diff --git a/durabletask/client.py b/durabletask/client.py index 82f920a..31953ae 100644 --- a/durabletask/client.py +++ b/durabletask/client.py @@ -6,7 +6,7 @@ from dataclasses import dataclass from datetime import datetime from enum import Enum -from typing import Any, List, Tuple, TypeVar, Union +from typing import Any, Optional, TypeVar, Union import grpc from google.protobuf import wrappers_pb2 @@ -42,10 +42,10 @@ class OrchestrationState: runtime_status: OrchestrationStatus created_at: datetime last_updated_at: datetime - serialized_input: Union[str, None] - serialized_output: Union[str, None] - serialized_custom_status: Union[str, None] - failure_details: Union[task.FailureDetails, None] + serialized_input: Optional[str] + serialized_output: Optional[str] + serialized_custom_status: Optional[str] + failure_details: Optional[task.FailureDetails] def raise_if_failed(self): if self.failure_details is not None: @@ -64,7 +64,7 @@ def failure_details(self): return self._failure_details -def new_orchestration_state(instance_id: str, res: pb.GetInstanceResponse) -> Union[OrchestrationState, None]: +def new_orchestration_state(instance_id: str, res: pb.GetInstanceResponse) -> Optional[OrchestrationState]: if not res.exists: return None @@ -92,20 +92,20 @@ def new_orchestration_state(instance_id: str, res: pb.GetInstanceResponse) -> Un class TaskHubGrpcClient: def __init__(self, *, - host_address: Union[str, None] = None, - metadata: Union[List[Tuple[str, str]], None] = None, - log_handler = None, - log_formatter: Union[logging.Formatter, None] = None, + host_address: Optional[str] = None, + metadata: Optional[list[tuple[str, str]]] = None, + log_handler: Optional[logging.Handler] = None, + log_formatter: Optional[logging.Formatter] = None, secure_channel: bool = False): channel = shared.get_grpc_channel(host_address, metadata, secure_channel=secure_channel) self._stub = stubs.TaskHubSidecarServiceStub(channel) self._logger = shared.get_logger("client", log_handler, log_formatter) def schedule_new_orchestration(self, orchestrator: Union[task.Orchestrator[TInput, TOutput], str], *, - input: Union[TInput, None] = None, - instance_id: Union[str, None] = None, - start_at: Union[datetime, None] = None, - reuse_id_policy: Union[pb.OrchestrationIdReusePolicy, None] = None) -> str: + input: Optional[TInput] = None, + instance_id: Optional[str] = None, + start_at: Optional[datetime] = None, + reuse_id_policy: Optional[pb.OrchestrationIdReusePolicy] = None) -> str: name = orchestrator if isinstance(orchestrator, str) else task.get_name(orchestrator) @@ -122,14 +122,14 @@ def schedule_new_orchestration(self, orchestrator: Union[task.Orchestrator[TInpu res: pb.CreateInstanceResponse = self._stub.StartInstance(req) return res.instanceId - def get_orchestration_state(self, instance_id: str, *, fetch_payloads: bool = True) -> Union[OrchestrationState, None]: + def get_orchestration_state(self, instance_id: str, *, fetch_payloads: bool = True) -> Optional[OrchestrationState]: req = pb.GetInstanceRequest(instanceId=instance_id, getInputsAndOutputs=fetch_payloads) res: pb.GetInstanceResponse = self._stub.GetInstance(req) return new_orchestration_state(req.instanceId, res) def wait_for_orchestration_start(self, instance_id: str, *, fetch_payloads: bool = False, - timeout: int = 60) -> Union[OrchestrationState, None]: + timeout: int = 60) -> Optional[OrchestrationState]: req = pb.GetInstanceRequest(instanceId=instance_id, getInputsAndOutputs=fetch_payloads) try: self._logger.info(f"Waiting up to {timeout}s for instance '{instance_id}' to start.") @@ -144,7 +144,7 @@ def wait_for_orchestration_start(self, instance_id: str, *, def wait_for_orchestration_completion(self, instance_id: str, *, fetch_payloads: bool = True, - timeout: int = 60) -> Union[OrchestrationState, None]: + timeout: int = 60) -> Optional[OrchestrationState]: req = pb.GetInstanceRequest(instanceId=instance_id, getInputsAndOutputs=fetch_payloads) try: self._logger.info(f"Waiting {timeout}s for instance '{instance_id}' to complete.") @@ -170,7 +170,7 @@ def wait_for_orchestration_completion(self, instance_id: str, *, raise def raise_orchestration_event(self, instance_id: str, event_name: str, *, - data: Union[Any, None] = None): + data: Optional[Any] = None): req = pb.RaiseEventRequest( instanceId=instance_id, name=event_name, @@ -180,7 +180,7 @@ def raise_orchestration_event(self, instance_id: str, event_name: str, *, self._stub.RaiseEvent(req) def terminate_orchestration(self, instance_id: str, *, - output: Union[Any, None] = None, + output: Optional[Any] = None, recursive: bool = True): req = pb.TerminateRequest( instanceId=instance_id, @@ -203,4 +203,4 @@ def resume_orchestration(self, instance_id: str): def purge_orchestration(self, instance_id: str, recursive: bool = True): req = pb.PurgeInstancesRequest(instanceId=instance_id, recursive=recursive) self._logger.info(f"Purging instance '{instance_id}'.") - self._stub.PurgeInstances() + self._stub.PurgeInstances(req) diff --git a/durabletask/internal/__init__.py b/durabletask/internal/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/durabletask/internal/grpc_interceptor.py b/durabletask/internal/grpc_interceptor.py index 5b12ace..738fca9 100644 --- a/durabletask/internal/grpc_interceptor.py +++ b/durabletask/internal/grpc_interceptor.py @@ -2,7 +2,6 @@ # Licensed under the MIT License. from collections import namedtuple -from typing import List, Tuple import grpc @@ -26,7 +25,7 @@ class DefaultClientInterceptorImpl ( StreamUnaryClientInterceptor and StreamStreamClientInterceptor from grpc to add an interceptor to add additional headers to all calls as needed.""" - def __init__(self, metadata: List[Tuple[str, str]]): + def __init__(self, metadata: list[tuple[str, str]]): super().__init__() self._metadata = metadata diff --git a/durabletask/internal/helpers.py b/durabletask/internal/helpers.py index c7354e5..6b36586 100644 --- a/durabletask/internal/helpers.py +++ b/durabletask/internal/helpers.py @@ -3,7 +3,7 @@ import traceback from datetime import datetime -from typing import List, Union +from typing import Optional from google.protobuf import timestamp_pb2, wrappers_pb2 @@ -12,14 +12,14 @@ # TODO: The new_xxx_event methods are only used by test code and should be moved elsewhere -def new_orchestrator_started_event(timestamp: Union[datetime, None] = None) -> pb.HistoryEvent: +def new_orchestrator_started_event(timestamp: Optional[datetime] = None) -> pb.HistoryEvent: ts = timestamp_pb2.Timestamp() if timestamp is not None: ts.FromDatetime(timestamp) return pb.HistoryEvent(eventId=-1, timestamp=ts, orchestratorStarted=pb.OrchestratorStartedEvent()) -def new_execution_started_event(name: str, instance_id: str, encoded_input: Union[str, None] = None) -> pb.HistoryEvent: +def new_execution_started_event(name: str, instance_id: str, encoded_input: Optional[str] = None) -> pb.HistoryEvent: return pb.HistoryEvent( eventId=-1, timestamp=timestamp_pb2.Timestamp(), @@ -49,7 +49,7 @@ def new_timer_fired_event(timer_id: int, fire_at: datetime) -> pb.HistoryEvent: ) -def new_task_scheduled_event(event_id: int, name: str, encoded_input: Union[str, None] = None) -> pb.HistoryEvent: +def new_task_scheduled_event(event_id: int, name: str, encoded_input: Optional[str] = None) -> pb.HistoryEvent: return pb.HistoryEvent( eventId=event_id, timestamp=timestamp_pb2.Timestamp(), @@ -57,7 +57,7 @@ def new_task_scheduled_event(event_id: int, name: str, encoded_input: Union[str, ) -def new_task_completed_event(event_id: int, encoded_output: Union[str, None] = None) -> pb.HistoryEvent: +def new_task_completed_event(event_id: int, encoded_output: Optional[str] = None) -> pb.HistoryEvent: return pb.HistoryEvent( eventId=-1, timestamp=timestamp_pb2.Timestamp(), @@ -77,7 +77,7 @@ def new_sub_orchestration_created_event( event_id: int, name: str, instance_id: str, - encoded_input: Union[str, None] = None) -> pb.HistoryEvent: + encoded_input: Optional[str] = None) -> pb.HistoryEvent: return pb.HistoryEvent( eventId=event_id, timestamp=timestamp_pb2.Timestamp(), @@ -88,7 +88,7 @@ def new_sub_orchestration_created_event( ) -def new_sub_orchestration_completed_event(event_id: int, encoded_output: Union[str, None] = None) -> pb.HistoryEvent: +def new_sub_orchestration_completed_event(event_id: int, encoded_output: Optional[str] = None) -> pb.HistoryEvent: return pb.HistoryEvent( eventId=-1, timestamp=timestamp_pb2.Timestamp(), @@ -116,7 +116,7 @@ def new_failure_details(ex: Exception) -> pb.TaskFailureDetails: ) -def new_event_raised_event(name: str, encoded_input: Union[str, None] = None) -> pb.HistoryEvent: +def new_event_raised_event(name: str, encoded_input: Optional[str] = None) -> pb.HistoryEvent: return pb.HistoryEvent( eventId=-1, timestamp=timestamp_pb2.Timestamp(), @@ -140,7 +140,7 @@ def new_resume_event() -> pb.HistoryEvent: ) -def new_terminated_event(*, encoded_output: Union[str, None] = None) -> pb.HistoryEvent: +def new_terminated_event(*, encoded_output: Optional[str] = None) -> pb.HistoryEvent: return pb.HistoryEvent( eventId=-1, timestamp=timestamp_pb2.Timestamp(), @@ -150,7 +150,7 @@ def new_terminated_event(*, encoded_output: Union[str, None] = None) -> pb.Histo ) -def get_string_value(val: Union[str, None]) -> Union[wrappers_pb2.StringValue, None]: +def get_string_value(val: Optional[str]) -> Optional[wrappers_pb2.StringValue]: if val is None: return None else: @@ -160,9 +160,9 @@ def get_string_value(val: Union[str, None]) -> Union[wrappers_pb2.StringValue, N def new_complete_orchestration_action( id: int, status: pb.OrchestrationStatus, - result: Union[str, None] = None, - failure_details: Union[pb.TaskFailureDetails, None] = None, - carryover_events: Union[List[pb.HistoryEvent], None] = None) -> pb.OrchestratorAction: + result: Optional[str] = None, + failure_details: Optional[pb.TaskFailureDetails] = None, + carryover_events: Optional[list[pb.HistoryEvent]] = None) -> pb.OrchestratorAction: completeOrchestrationAction = pb.CompleteOrchestrationAction( orchestrationStatus=status, result=get_string_value(result), @@ -178,7 +178,7 @@ def new_create_timer_action(id: int, fire_at: datetime) -> pb.OrchestratorAction return pb.OrchestratorAction(id=id, createTimer=pb.CreateTimerAction(fireAt=timestamp)) -def new_schedule_task_action(id: int, name: str, encoded_input: Union[str, None]) -> pb.OrchestratorAction: +def new_schedule_task_action(id: int, name: str, encoded_input: Optional[str]) -> pb.OrchestratorAction: return pb.OrchestratorAction(id=id, scheduleTask=pb.ScheduleTaskAction( name=name, input=get_string_value(encoded_input) @@ -194,8 +194,8 @@ def new_timestamp(dt: datetime) -> timestamp_pb2.Timestamp: def new_create_sub_orchestration_action( id: int, name: str, - instance_id: Union[str, None], - encoded_input: Union[str, None]) -> pb.OrchestratorAction: + instance_id: Optional[str], + encoded_input: Optional[str]) -> pb.OrchestratorAction: return pb.OrchestratorAction(id=id, createSubOrchestration=pb.CreateSubOrchestrationAction( name=name, instanceId=instance_id, diff --git a/durabletask/internal/orchestrator_service_pb2.py b/durabletask/internal/orchestrator_service_pb2.py index 6ee3bbb..9c92eac 100644 --- a/durabletask/internal/orchestrator_service_pb2.py +++ b/durabletask/internal/orchestrator_service_pb2.py @@ -1,22 +1,12 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! -# NO CHECKED-IN PROTOBUF GENCODE -# source: orchestrator_service.proto -# Protobuf Python Version: 5.27.2 +# source: durabletask/internal/orchestrator_service.proto +# Protobuf Python Version: 4.25.1 """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import runtime_version as _runtime_version from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder -_runtime_version.ValidateProtobufRuntimeVersion( - _runtime_version.Domain.PUBLIC, - 5, - 27, - 2, - '', - 'orchestrator_service.proto' -) # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() @@ -28,196 +18,196 @@ from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1aorchestrator_service.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1bgoogle/protobuf/empty.proto\"^\n\x15OrchestrationInstance\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xed\x01\n\x0f\x41\x63tivityRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\x12\x0e\n\x06taskId\x18\x05 \x01(\x05\x12)\n\x12parentTraceContext\x18\x06 \x01(\x0b\x32\r.TraceContext\"\x91\x01\n\x10\x41\x63tivityResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0e\n\x06taskId\x18\x02 \x01(\x05\x12,\n\x06result\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\"\xb2\x01\n\x12TaskFailureDetails\x12\x11\n\terrorType\x18\x01 \x01(\t\x12\x14\n\x0c\x65rrorMessage\x18\x02 \x01(\t\x12\x30\n\nstackTrace\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x0cinnerFailure\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\x12\x16\n\x0eisNonRetriable\x18\x05 \x01(\x08\"\xbf\x01\n\x12ParentInstanceInfo\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12*\n\x04name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\"i\n\x0cTraceContext\x12\x13\n\x0btraceParent\x18\x01 \x01(\t\x12\x12\n\x06spanID\x18\x02 \x01(\tB\x02\x18\x01\x12\x30\n\ntraceState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x88\x03\n\x15\x45xecutionStartedEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\x12+\n\x0eparentInstance\x18\x05 \x01(\x0b\x32\x13.ParentInstanceInfo\x12;\n\x17scheduledStartTimestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12)\n\x12parentTraceContext\x18\x07 \x01(\x0b\x32\r.TraceContext\x12\x39\n\x13orchestrationSpanID\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xa7\x01\n\x17\x45xecutionCompletedEvent\x12\x31\n\x13orchestrationStatus\x18\x01 \x01(\x0e\x32\x14.OrchestrationStatus\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x03 \x01(\x0b\x32\x13.TaskFailureDetails\"X\n\x18\x45xecutionTerminatedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x0f\n\x07recurse\x18\x02 \x01(\x08\"\xa9\x01\n\x12TaskScheduledEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x12parentTraceContext\x18\x04 \x01(\x0b\x32\r.TraceContext\"[\n\x12TaskCompletedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"W\n\x0fTaskFailedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\"\xcf\x01\n$SubOrchestrationInstanceCreatedEvent\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x12parentTraceContext\x18\x05 \x01(\x0b\x32\r.TraceContext\"o\n&SubOrchestrationInstanceCompletedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"k\n#SubOrchestrationInstanceFailedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\"?\n\x11TimerCreatedEvent\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"N\n\x0fTimerFiredEvent\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0f\n\x07timerId\x18\x02 \x01(\x05\"\x1a\n\x18OrchestratorStartedEvent\"\x1c\n\x1aOrchestratorCompletedEvent\"_\n\x0e\x45ventSentEvent\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"M\n\x10\x45ventRaisedEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x05input\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\":\n\x0cGenericEvent\x12*\n\x04\x64\x61ta\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"D\n\x11HistoryStateEvent\x12/\n\x12orchestrationState\x18\x01 \x01(\x0b\x32\x13.OrchestrationState\"A\n\x12\x43ontinueAsNewEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"F\n\x17\x45xecutionSuspendedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"D\n\x15\x45xecutionResumedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x86\t\n\x0cHistoryEvent\x12\x0f\n\x07\x65ventId\x18\x01 \x01(\x05\x12-\n\ttimestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x10\x65xecutionStarted\x18\x03 \x01(\x0b\x32\x16.ExecutionStartedEventH\x00\x12\x36\n\x12\x65xecutionCompleted\x18\x04 \x01(\x0b\x32\x18.ExecutionCompletedEventH\x00\x12\x38\n\x13\x65xecutionTerminated\x18\x05 \x01(\x0b\x32\x19.ExecutionTerminatedEventH\x00\x12,\n\rtaskScheduled\x18\x06 \x01(\x0b\x32\x13.TaskScheduledEventH\x00\x12,\n\rtaskCompleted\x18\x07 \x01(\x0b\x32\x13.TaskCompletedEventH\x00\x12&\n\ntaskFailed\x18\x08 \x01(\x0b\x32\x10.TaskFailedEventH\x00\x12P\n\x1fsubOrchestrationInstanceCreated\x18\t \x01(\x0b\x32%.SubOrchestrationInstanceCreatedEventH\x00\x12T\n!subOrchestrationInstanceCompleted\x18\n \x01(\x0b\x32\'.SubOrchestrationInstanceCompletedEventH\x00\x12N\n\x1esubOrchestrationInstanceFailed\x18\x0b \x01(\x0b\x32$.SubOrchestrationInstanceFailedEventH\x00\x12*\n\x0ctimerCreated\x18\x0c \x01(\x0b\x32\x12.TimerCreatedEventH\x00\x12&\n\ntimerFired\x18\r \x01(\x0b\x32\x10.TimerFiredEventH\x00\x12\x38\n\x13orchestratorStarted\x18\x0e \x01(\x0b\x32\x19.OrchestratorStartedEventH\x00\x12<\n\x15orchestratorCompleted\x18\x0f \x01(\x0b\x32\x1b.OrchestratorCompletedEventH\x00\x12$\n\teventSent\x18\x10 \x01(\x0b\x32\x0f.EventSentEventH\x00\x12(\n\x0b\x65ventRaised\x18\x11 \x01(\x0b\x32\x11.EventRaisedEventH\x00\x12%\n\x0cgenericEvent\x18\x12 \x01(\x0b\x32\r.GenericEventH\x00\x12*\n\x0chistoryState\x18\x13 \x01(\x0b\x32\x12.HistoryStateEventH\x00\x12,\n\rcontinueAsNew\x18\x14 \x01(\x0b\x32\x13.ContinueAsNewEventH\x00\x12\x36\n\x12\x65xecutionSuspended\x18\x15 \x01(\x0b\x32\x18.ExecutionSuspendedEventH\x00\x12\x32\n\x10\x65xecutionResumed\x18\x16 \x01(\x0b\x32\x16.ExecutionResumedEventH\x00\x42\x0b\n\teventType\"~\n\x12ScheduleTaskAction\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x9c\x01\n\x1c\x43reateSubOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"?\n\x11\x43reateTimerAction\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"u\n\x0fSendEventAction\x12(\n\x08instance\x18\x01 \x01(\x0b\x32\x16.OrchestrationInstance\x12\x0c\n\x04name\x18\x02 \x01(\t\x12*\n\x04\x64\x61ta\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xb4\x02\n\x1b\x43ompleteOrchestrationAction\x12\x31\n\x13orchestrationStatus\x18\x01 \x01(\x0e\x32\x14.OrchestrationStatus\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07\x64\x65tails\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\nnewVersion\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12&\n\x0f\x63\x61rryoverEvents\x18\x05 \x03(\x0b\x32\r.HistoryEvent\x12+\n\x0e\x66\x61ilureDetails\x18\x06 \x01(\x0b\x32\x13.TaskFailureDetails\"q\n\x1cTerminateOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x0f\n\x07recurse\x18\x03 \x01(\x08\"\xfa\x02\n\x12OrchestratorAction\x12\n\n\x02id\x18\x01 \x01(\x05\x12+\n\x0cscheduleTask\x18\x02 \x01(\x0b\x32\x13.ScheduleTaskActionH\x00\x12?\n\x16\x63reateSubOrchestration\x18\x03 \x01(\x0b\x32\x1d.CreateSubOrchestrationActionH\x00\x12)\n\x0b\x63reateTimer\x18\x04 \x01(\x0b\x32\x12.CreateTimerActionH\x00\x12%\n\tsendEvent\x18\x05 \x01(\x0b\x32\x10.SendEventActionH\x00\x12=\n\x15\x63ompleteOrchestration\x18\x06 \x01(\x0b\x32\x1c.CompleteOrchestrationActionH\x00\x12?\n\x16terminateOrchestration\x18\x07 \x01(\x0b\x32\x1d.TerminateOrchestrationActionH\x00\x42\x18\n\x16orchestratorActionType\"\xda\x01\n\x13OrchestratorRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12!\n\npastEvents\x18\x03 \x03(\x0b\x32\r.HistoryEvent\x12 \n\tnewEvents\x18\x04 \x03(\x0b\x32\r.HistoryEvent\x12\x37\n\x10\x65ntityParameters\x18\x05 \x01(\x0b\x32\x1d.OrchestratorEntityParameters\"\x84\x01\n\x14OrchestratorResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12$\n\x07\x61\x63tions\x18\x02 \x03(\x0b\x32\x13.OrchestratorAction\x12\x32\n\x0c\x63ustomStatus\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xa3\x03\n\x15\x43reateInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12;\n\x17scheduledStartTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12?\n\x1aorchestrationIdReusePolicy\x18\x06 \x01(\x0b\x32\x1b.OrchestrationIdReusePolicy\x12\x31\n\x0b\x65xecutionId\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x04tags\x18\x08 \x03(\x0b\x32 .CreateInstanceRequest.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"w\n\x1aOrchestrationIdReusePolicy\x12-\n\x0foperationStatus\x18\x01 \x03(\x0e\x32\x14.OrchestrationStatus\x12*\n\x06\x61\x63tion\x18\x02 \x01(\x0e\x32\x1a.CreateOrchestrationAction\",\n\x16\x43reateInstanceResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\"E\n\x12GetInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x1b\n\x13getInputsAndOutputs\x18\x02 \x01(\x08\"V\n\x13GetInstanceResponse\x12\x0e\n\x06\x65xists\x18\x01 \x01(\x08\x12/\n\x12orchestrationState\x18\x02 \x01(\x0b\x32\x13.OrchestrationState\"Y\n\x15RewindInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x18\n\x16RewindInstanceResponse\"\xa4\x05\n\x12OrchestrationState\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\x13orchestrationStatus\x18\x04 \x01(\x0e\x32\x14.OrchestrationStatus\x12;\n\x17scheduledStartTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x34\n\x10\x63reatedTimestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x38\n\x14lastUpdatedTimestamp\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\x05input\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12,\n\x06output\x18\t \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0c\x63ustomStatus\x18\n \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x0b \x01(\x0b\x32\x13.TaskFailureDetails\x12\x31\n\x0b\x65xecutionId\x18\x0c \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x12\x63ompletedTimestamp\x18\r \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x36\n\x10parentInstanceId\x18\x0e \x01(\x0b\x32\x1c.google.protobuf.StringValue\"b\n\x11RaiseEventRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x14\n\x12RaiseEventResponse\"g\n\x10TerminateRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06output\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x11\n\trecursive\x18\x03 \x01(\x08\"\x13\n\x11TerminateResponse\"R\n\x0eSuspendRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x11\n\x0fSuspendResponse\"Q\n\rResumeRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x10\n\x0eResumeResponse\"6\n\x15QueryInstancesRequest\x12\x1d\n\x05query\x18\x01 \x01(\x0b\x32\x0e.InstanceQuery\"\x82\x03\n\rInstanceQuery\x12+\n\rruntimeStatus\x18\x01 \x03(\x0e\x32\x14.OrchestrationStatus\x12\x33\n\x0f\x63reatedTimeFrom\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rcreatedTimeTo\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0ctaskHubNames\x18\x04 \x03(\x0b\x32\x1c.google.protobuf.StringValue\x12\x18\n\x10maxInstanceCount\x18\x05 \x01(\x05\x12\x37\n\x11\x63ontinuationToken\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10instanceIdPrefix\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1d\n\x15\x66\x65tchInputsAndOutputs\x18\x08 \x01(\x08\"\x82\x01\n\x16QueryInstancesResponse\x12/\n\x12orchestrationState\x18\x01 \x03(\x0b\x32\x13.OrchestrationState\x12\x37\n\x11\x63ontinuationToken\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x80\x01\n\x15PurgeInstancesRequest\x12\x14\n\ninstanceId\x18\x01 \x01(\tH\x00\x12\x33\n\x13purgeInstanceFilter\x18\x02 \x01(\x0b\x32\x14.PurgeInstanceFilterH\x00\x12\x11\n\trecursive\x18\x03 \x01(\x08\x42\t\n\x07request\"\xaa\x01\n\x13PurgeInstanceFilter\x12\x33\n\x0f\x63reatedTimeFrom\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rcreatedTimeTo\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\rruntimeStatus\x18\x03 \x03(\x0e\x32\x14.OrchestrationStatus\"6\n\x16PurgeInstancesResponse\x12\x1c\n\x14\x64\x65letedInstanceCount\x18\x01 \x01(\x05\"0\n\x14\x43reateTaskHubRequest\x12\x18\n\x10recreateIfExists\x18\x01 \x01(\x08\"\x17\n\x15\x43reateTaskHubResponse\"\x16\n\x14\x44\x65leteTaskHubRequest\"\x17\n\x15\x44\x65leteTaskHubResponse\"\xaa\x01\n\x13SignalEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x11\n\trequestId\x18\x04 \x01(\t\x12\x31\n\rscheduledTime\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\x16\n\x14SignalEntityResponse\"<\n\x10GetEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x14\n\x0cincludeState\x18\x02 \x01(\x08\"D\n\x11GetEntityResponse\x12\x0e\n\x06\x65xists\x18\x01 \x01(\x08\x12\x1f\n\x06\x65ntity\x18\x02 \x01(\x0b\x32\x0f.EntityMetadata\"\xcb\x02\n\x0b\x45ntityQuery\x12:\n\x14instanceIdStartsWith\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x34\n\x10lastModifiedFrom\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0elastModifiedTo\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x14\n\x0cincludeState\x18\x04 \x01(\x08\x12\x18\n\x10includeTransient\x18\x05 \x01(\x08\x12-\n\x08pageSize\x18\x06 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12\x37\n\x11\x63ontinuationToken\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"3\n\x14QueryEntitiesRequest\x12\x1b\n\x05query\x18\x01 \x01(\x0b\x32\x0c.EntityQuery\"s\n\x15QueryEntitiesResponse\x12!\n\x08\x65ntities\x18\x01 \x03(\x0b\x32\x0f.EntityMetadata\x12\x37\n\x11\x63ontinuationToken\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xdb\x01\n\x0e\x45ntityMetadata\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x34\n\x10lastModifiedTime\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x18\n\x10\x62\x61\x63klogQueueSize\x18\x03 \x01(\x05\x12.\n\x08lockedBy\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x0fserializedState\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x8f\x01\n\x19\x43leanEntityStorageRequest\x12\x37\n\x11\x63ontinuationToken\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1b\n\x13removeEmptyEntities\x18\x02 \x01(\x08\x12\x1c\n\x14releaseOrphanedLocks\x18\x03 \x01(\x08\"\x92\x01\n\x1a\x43leanEntityStorageResponse\x12\x37\n\x11\x63ontinuationToken\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1c\n\x14\x65mptyEntitiesRemoved\x18\x02 \x01(\x05\x12\x1d\n\x15orphanedLocksReleased\x18\x03 \x01(\x05\"]\n\x1cOrchestratorEntityParameters\x12=\n\x1a\x65ntityMessageReorderWindow\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\"\x82\x01\n\x12\x45ntityBatchRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65ntityState\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12%\n\noperations\x18\x03 \x03(\x0b\x32\x11.OperationRequest\"\xb9\x01\n\x11\x45ntityBatchResult\x12!\n\x07results\x18\x01 \x03(\x0b\x32\x10.OperationResult\x12!\n\x07\x61\x63tions\x18\x02 \x03(\x0b\x32\x10.OperationAction\x12\x31\n\x0b\x65ntityState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\"e\n\x10OperationRequest\x12\x11\n\toperation\x18\x01 \x01(\t\x12\x11\n\trequestId\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"w\n\x0fOperationResult\x12*\n\x07success\x18\x01 \x01(\x0b\x32\x17.OperationResultSuccessH\x00\x12*\n\x07\x66\x61ilure\x18\x02 \x01(\x0b\x32\x17.OperationResultFailureH\x00\x42\x0c\n\nresultType\"F\n\x16OperationResultSuccess\x12,\n\x06result\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"E\n\x16OperationResultFailure\x12+\n\x0e\x66\x61ilureDetails\x18\x01 \x01(\x0b\x32\x13.TaskFailureDetails\"\x9c\x01\n\x0fOperationAction\x12\n\n\x02id\x18\x01 \x01(\x05\x12\'\n\nsendSignal\x18\x02 \x01(\x0b\x32\x11.SendSignalActionH\x00\x12=\n\x15startNewOrchestration\x18\x03 \x01(\x0b\x32\x1c.StartNewOrchestrationActionH\x00\x42\x15\n\x13operationActionType\"\x94\x01\n\x10SendSignalAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rscheduledTime\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xce\x01\n\x1bStartNewOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rscheduledTime\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\x15\n\x13GetWorkItemsRequest\"\xe1\x01\n\x08WorkItem\x12\x33\n\x13orchestratorRequest\x18\x01 \x01(\x0b\x32\x14.OrchestratorRequestH\x00\x12+\n\x0f\x61\x63tivityRequest\x18\x02 \x01(\x0b\x32\x10.ActivityRequestH\x00\x12,\n\rentityRequest\x18\x03 \x01(\x0b\x32\x13.EntityBatchRequestH\x00\x12!\n\nhealthPing\x18\x04 \x01(\x0b\x32\x0b.HealthPingH\x00\x12\x17\n\x0f\x63ompletionToken\x18\n \x01(\tB\t\n\x07request\"\x16\n\x14\x43ompleteTaskResponse\"\x0c\n\nHealthPing*\xb5\x02\n\x13OrchestrationStatus\x12 \n\x1cORCHESTRATION_STATUS_RUNNING\x10\x00\x12\"\n\x1eORCHESTRATION_STATUS_COMPLETED\x10\x01\x12)\n%ORCHESTRATION_STATUS_CONTINUED_AS_NEW\x10\x02\x12\x1f\n\x1bORCHESTRATION_STATUS_FAILED\x10\x03\x12!\n\x1dORCHESTRATION_STATUS_CANCELED\x10\x04\x12#\n\x1fORCHESTRATION_STATUS_TERMINATED\x10\x05\x12 \n\x1cORCHESTRATION_STATUS_PENDING\x10\x06\x12\"\n\x1eORCHESTRATION_STATUS_SUSPENDED\x10\x07*A\n\x19\x43reateOrchestrationAction\x12\t\n\x05\x45RROR\x10\x00\x12\n\n\x06IGNORE\x10\x01\x12\r\n\tTERMINATE\x10\x02\x32\xfc\n\n\x15TaskHubSidecarService\x12\x37\n\x05Hello\x12\x16.google.protobuf.Empty\x1a\x16.google.protobuf.Empty\x12@\n\rStartInstance\x12\x16.CreateInstanceRequest\x1a\x17.CreateInstanceResponse\x12\x38\n\x0bGetInstance\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x41\n\x0eRewindInstance\x12\x16.RewindInstanceRequest\x1a\x17.RewindInstanceResponse\x12\x41\n\x14WaitForInstanceStart\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x46\n\x19WaitForInstanceCompletion\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x35\n\nRaiseEvent\x12\x12.RaiseEventRequest\x1a\x13.RaiseEventResponse\x12:\n\x11TerminateInstance\x12\x11.TerminateRequest\x1a\x12.TerminateResponse\x12\x34\n\x0fSuspendInstance\x12\x0f.SuspendRequest\x1a\x10.SuspendResponse\x12\x31\n\x0eResumeInstance\x12\x0e.ResumeRequest\x1a\x0f.ResumeResponse\x12\x41\n\x0eQueryInstances\x12\x16.QueryInstancesRequest\x1a\x17.QueryInstancesResponse\x12\x41\n\x0ePurgeInstances\x12\x16.PurgeInstancesRequest\x1a\x17.PurgeInstancesResponse\x12\x31\n\x0cGetWorkItems\x12\x14.GetWorkItemsRequest\x1a\t.WorkItem0\x01\x12@\n\x14\x43ompleteActivityTask\x12\x11.ActivityResponse\x1a\x15.CompleteTaskResponse\x12H\n\x18\x43ompleteOrchestratorTask\x12\x15.OrchestratorResponse\x1a\x15.CompleteTaskResponse\x12?\n\x12\x43ompleteEntityTask\x12\x12.EntityBatchResult\x1a\x15.CompleteTaskResponse\x12>\n\rCreateTaskHub\x12\x15.CreateTaskHubRequest\x1a\x16.CreateTaskHubResponse\x12>\n\rDeleteTaskHub\x12\x15.DeleteTaskHubRequest\x1a\x16.DeleteTaskHubResponse\x12;\n\x0cSignalEntity\x12\x14.SignalEntityRequest\x1a\x15.SignalEntityResponse\x12\x32\n\tGetEntity\x12\x11.GetEntityRequest\x1a\x12.GetEntityResponse\x12>\n\rQueryEntities\x12\x15.QueryEntitiesRequest\x1a\x16.QueryEntitiesResponse\x12M\n\x12\x43leanEntityStorage\x12\x1a.CleanEntityStorageRequest\x1a\x1b.CleanEntityStorageResponseBf\n1com.microsoft.durabletask.implementation.protobufZ\x10/internal/protos\xaa\x02\x1eMicrosoft.DurableTask.Protobufb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n/durabletask/internal/orchestrator_service.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1bgoogle/protobuf/empty.proto\"^\n\x15OrchestrationInstance\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xed\x01\n\x0f\x41\x63tivityRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\x12\x0e\n\x06taskId\x18\x05 \x01(\x05\x12)\n\x12parentTraceContext\x18\x06 \x01(\x0b\x32\r.TraceContext\"\x91\x01\n\x10\x41\x63tivityResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0e\n\x06taskId\x18\x02 \x01(\x05\x12,\n\x06result\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\"\xb2\x01\n\x12TaskFailureDetails\x12\x11\n\terrorType\x18\x01 \x01(\t\x12\x14\n\x0c\x65rrorMessage\x18\x02 \x01(\t\x12\x30\n\nstackTrace\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x0cinnerFailure\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\x12\x16\n\x0eisNonRetriable\x18\x05 \x01(\x08\"\xbf\x01\n\x12ParentInstanceInfo\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12*\n\x04name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\"i\n\x0cTraceContext\x12\x13\n\x0btraceParent\x18\x01 \x01(\t\x12\x12\n\x06spanID\x18\x02 \x01(\tB\x02\x18\x01\x12\x30\n\ntraceState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x88\x03\n\x15\x45xecutionStartedEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\x12+\n\x0eparentInstance\x18\x05 \x01(\x0b\x32\x13.ParentInstanceInfo\x12;\n\x17scheduledStartTimestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12)\n\x12parentTraceContext\x18\x07 \x01(\x0b\x32\r.TraceContext\x12\x39\n\x13orchestrationSpanID\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xa7\x01\n\x17\x45xecutionCompletedEvent\x12\x31\n\x13orchestrationStatus\x18\x01 \x01(\x0e\x32\x14.OrchestrationStatus\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x03 \x01(\x0b\x32\x13.TaskFailureDetails\"X\n\x18\x45xecutionTerminatedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x0f\n\x07recurse\x18\x02 \x01(\x08\"\xa9\x01\n\x12TaskScheduledEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x12parentTraceContext\x18\x04 \x01(\x0b\x32\r.TraceContext\"[\n\x12TaskCompletedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"W\n\x0fTaskFailedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\"\xcf\x01\n$SubOrchestrationInstanceCreatedEvent\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x12parentTraceContext\x18\x05 \x01(\x0b\x32\r.TraceContext\"o\n&SubOrchestrationInstanceCompletedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"k\n#SubOrchestrationInstanceFailedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\"?\n\x11TimerCreatedEvent\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"N\n\x0fTimerFiredEvent\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0f\n\x07timerId\x18\x02 \x01(\x05\"\x1a\n\x18OrchestratorStartedEvent\"\x1c\n\x1aOrchestratorCompletedEvent\"_\n\x0e\x45ventSentEvent\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"M\n\x10\x45ventRaisedEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x05input\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\":\n\x0cGenericEvent\x12*\n\x04\x64\x61ta\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"D\n\x11HistoryStateEvent\x12/\n\x12orchestrationState\x18\x01 \x01(\x0b\x32\x13.OrchestrationState\"A\n\x12\x43ontinueAsNewEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"F\n\x17\x45xecutionSuspendedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"D\n\x15\x45xecutionResumedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x86\t\n\x0cHistoryEvent\x12\x0f\n\x07\x65ventId\x18\x01 \x01(\x05\x12-\n\ttimestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x10\x65xecutionStarted\x18\x03 \x01(\x0b\x32\x16.ExecutionStartedEventH\x00\x12\x36\n\x12\x65xecutionCompleted\x18\x04 \x01(\x0b\x32\x18.ExecutionCompletedEventH\x00\x12\x38\n\x13\x65xecutionTerminated\x18\x05 \x01(\x0b\x32\x19.ExecutionTerminatedEventH\x00\x12,\n\rtaskScheduled\x18\x06 \x01(\x0b\x32\x13.TaskScheduledEventH\x00\x12,\n\rtaskCompleted\x18\x07 \x01(\x0b\x32\x13.TaskCompletedEventH\x00\x12&\n\ntaskFailed\x18\x08 \x01(\x0b\x32\x10.TaskFailedEventH\x00\x12P\n\x1fsubOrchestrationInstanceCreated\x18\t \x01(\x0b\x32%.SubOrchestrationInstanceCreatedEventH\x00\x12T\n!subOrchestrationInstanceCompleted\x18\n \x01(\x0b\x32\'.SubOrchestrationInstanceCompletedEventH\x00\x12N\n\x1esubOrchestrationInstanceFailed\x18\x0b \x01(\x0b\x32$.SubOrchestrationInstanceFailedEventH\x00\x12*\n\x0ctimerCreated\x18\x0c \x01(\x0b\x32\x12.TimerCreatedEventH\x00\x12&\n\ntimerFired\x18\r \x01(\x0b\x32\x10.TimerFiredEventH\x00\x12\x38\n\x13orchestratorStarted\x18\x0e \x01(\x0b\x32\x19.OrchestratorStartedEventH\x00\x12<\n\x15orchestratorCompleted\x18\x0f \x01(\x0b\x32\x1b.OrchestratorCompletedEventH\x00\x12$\n\teventSent\x18\x10 \x01(\x0b\x32\x0f.EventSentEventH\x00\x12(\n\x0b\x65ventRaised\x18\x11 \x01(\x0b\x32\x11.EventRaisedEventH\x00\x12%\n\x0cgenericEvent\x18\x12 \x01(\x0b\x32\r.GenericEventH\x00\x12*\n\x0chistoryState\x18\x13 \x01(\x0b\x32\x12.HistoryStateEventH\x00\x12,\n\rcontinueAsNew\x18\x14 \x01(\x0b\x32\x13.ContinueAsNewEventH\x00\x12\x36\n\x12\x65xecutionSuspended\x18\x15 \x01(\x0b\x32\x18.ExecutionSuspendedEventH\x00\x12\x32\n\x10\x65xecutionResumed\x18\x16 \x01(\x0b\x32\x16.ExecutionResumedEventH\x00\x42\x0b\n\teventType\"~\n\x12ScheduleTaskAction\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x9c\x01\n\x1c\x43reateSubOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"?\n\x11\x43reateTimerAction\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"u\n\x0fSendEventAction\x12(\n\x08instance\x18\x01 \x01(\x0b\x32\x16.OrchestrationInstance\x12\x0c\n\x04name\x18\x02 \x01(\t\x12*\n\x04\x64\x61ta\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xb4\x02\n\x1b\x43ompleteOrchestrationAction\x12\x31\n\x13orchestrationStatus\x18\x01 \x01(\x0e\x32\x14.OrchestrationStatus\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07\x64\x65tails\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\nnewVersion\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12&\n\x0f\x63\x61rryoverEvents\x18\x05 \x03(\x0b\x32\r.HistoryEvent\x12+\n\x0e\x66\x61ilureDetails\x18\x06 \x01(\x0b\x32\x13.TaskFailureDetails\"q\n\x1cTerminateOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x0f\n\x07recurse\x18\x03 \x01(\x08\"\xfa\x02\n\x12OrchestratorAction\x12\n\n\x02id\x18\x01 \x01(\x05\x12+\n\x0cscheduleTask\x18\x02 \x01(\x0b\x32\x13.ScheduleTaskActionH\x00\x12?\n\x16\x63reateSubOrchestration\x18\x03 \x01(\x0b\x32\x1d.CreateSubOrchestrationActionH\x00\x12)\n\x0b\x63reateTimer\x18\x04 \x01(\x0b\x32\x12.CreateTimerActionH\x00\x12%\n\tsendEvent\x18\x05 \x01(\x0b\x32\x10.SendEventActionH\x00\x12=\n\x15\x63ompleteOrchestration\x18\x06 \x01(\x0b\x32\x1c.CompleteOrchestrationActionH\x00\x12?\n\x16terminateOrchestration\x18\x07 \x01(\x0b\x32\x1d.TerminateOrchestrationActionH\x00\x42\x18\n\x16orchestratorActionType\"\xda\x01\n\x13OrchestratorRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12!\n\npastEvents\x18\x03 \x03(\x0b\x32\r.HistoryEvent\x12 \n\tnewEvents\x18\x04 \x03(\x0b\x32\r.HistoryEvent\x12\x37\n\x10\x65ntityParameters\x18\x05 \x01(\x0b\x32\x1d.OrchestratorEntityParameters\"\x84\x01\n\x14OrchestratorResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12$\n\x07\x61\x63tions\x18\x02 \x03(\x0b\x32\x13.OrchestratorAction\x12\x32\n\x0c\x63ustomStatus\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xa3\x03\n\x15\x43reateInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12;\n\x17scheduledStartTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12?\n\x1aorchestrationIdReusePolicy\x18\x06 \x01(\x0b\x32\x1b.OrchestrationIdReusePolicy\x12\x31\n\x0b\x65xecutionId\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x04tags\x18\x08 \x03(\x0b\x32 .CreateInstanceRequest.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"w\n\x1aOrchestrationIdReusePolicy\x12-\n\x0foperationStatus\x18\x01 \x03(\x0e\x32\x14.OrchestrationStatus\x12*\n\x06\x61\x63tion\x18\x02 \x01(\x0e\x32\x1a.CreateOrchestrationAction\",\n\x16\x43reateInstanceResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\"E\n\x12GetInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x1b\n\x13getInputsAndOutputs\x18\x02 \x01(\x08\"V\n\x13GetInstanceResponse\x12\x0e\n\x06\x65xists\x18\x01 \x01(\x08\x12/\n\x12orchestrationState\x18\x02 \x01(\x0b\x32\x13.OrchestrationState\"Y\n\x15RewindInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x18\n\x16RewindInstanceResponse\"\xa4\x05\n\x12OrchestrationState\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\x13orchestrationStatus\x18\x04 \x01(\x0e\x32\x14.OrchestrationStatus\x12;\n\x17scheduledStartTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x34\n\x10\x63reatedTimestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x38\n\x14lastUpdatedTimestamp\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\x05input\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12,\n\x06output\x18\t \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0c\x63ustomStatus\x18\n \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x0b \x01(\x0b\x32\x13.TaskFailureDetails\x12\x31\n\x0b\x65xecutionId\x18\x0c \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x12\x63ompletedTimestamp\x18\r \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x36\n\x10parentInstanceId\x18\x0e \x01(\x0b\x32\x1c.google.protobuf.StringValue\"b\n\x11RaiseEventRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x14\n\x12RaiseEventResponse\"g\n\x10TerminateRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06output\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x11\n\trecursive\x18\x03 \x01(\x08\"\x13\n\x11TerminateResponse\"R\n\x0eSuspendRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x11\n\x0fSuspendResponse\"Q\n\rResumeRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x10\n\x0eResumeResponse\"6\n\x15QueryInstancesRequest\x12\x1d\n\x05query\x18\x01 \x01(\x0b\x32\x0e.InstanceQuery\"\x82\x03\n\rInstanceQuery\x12+\n\rruntimeStatus\x18\x01 \x03(\x0e\x32\x14.OrchestrationStatus\x12\x33\n\x0f\x63reatedTimeFrom\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rcreatedTimeTo\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0ctaskHubNames\x18\x04 \x03(\x0b\x32\x1c.google.protobuf.StringValue\x12\x18\n\x10maxInstanceCount\x18\x05 \x01(\x05\x12\x37\n\x11\x63ontinuationToken\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10instanceIdPrefix\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1d\n\x15\x66\x65tchInputsAndOutputs\x18\x08 \x01(\x08\"\x82\x01\n\x16QueryInstancesResponse\x12/\n\x12orchestrationState\x18\x01 \x03(\x0b\x32\x13.OrchestrationState\x12\x37\n\x11\x63ontinuationToken\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x80\x01\n\x15PurgeInstancesRequest\x12\x14\n\ninstanceId\x18\x01 \x01(\tH\x00\x12\x33\n\x13purgeInstanceFilter\x18\x02 \x01(\x0b\x32\x14.PurgeInstanceFilterH\x00\x12\x11\n\trecursive\x18\x03 \x01(\x08\x42\t\n\x07request\"\xaa\x01\n\x13PurgeInstanceFilter\x12\x33\n\x0f\x63reatedTimeFrom\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rcreatedTimeTo\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\rruntimeStatus\x18\x03 \x03(\x0e\x32\x14.OrchestrationStatus\"6\n\x16PurgeInstancesResponse\x12\x1c\n\x14\x64\x65letedInstanceCount\x18\x01 \x01(\x05\"0\n\x14\x43reateTaskHubRequest\x12\x18\n\x10recreateIfExists\x18\x01 \x01(\x08\"\x17\n\x15\x43reateTaskHubResponse\"\x16\n\x14\x44\x65leteTaskHubRequest\"\x17\n\x15\x44\x65leteTaskHubResponse\"\xaa\x01\n\x13SignalEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x11\n\trequestId\x18\x04 \x01(\t\x12\x31\n\rscheduledTime\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\x16\n\x14SignalEntityResponse\"<\n\x10GetEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x14\n\x0cincludeState\x18\x02 \x01(\x08\"D\n\x11GetEntityResponse\x12\x0e\n\x06\x65xists\x18\x01 \x01(\x08\x12\x1f\n\x06\x65ntity\x18\x02 \x01(\x0b\x32\x0f.EntityMetadata\"\xcb\x02\n\x0b\x45ntityQuery\x12:\n\x14instanceIdStartsWith\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x34\n\x10lastModifiedFrom\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0elastModifiedTo\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x14\n\x0cincludeState\x18\x04 \x01(\x08\x12\x18\n\x10includeTransient\x18\x05 \x01(\x08\x12-\n\x08pageSize\x18\x06 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12\x37\n\x11\x63ontinuationToken\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"3\n\x14QueryEntitiesRequest\x12\x1b\n\x05query\x18\x01 \x01(\x0b\x32\x0c.EntityQuery\"s\n\x15QueryEntitiesResponse\x12!\n\x08\x65ntities\x18\x01 \x03(\x0b\x32\x0f.EntityMetadata\x12\x37\n\x11\x63ontinuationToken\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xdb\x01\n\x0e\x45ntityMetadata\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x34\n\x10lastModifiedTime\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x18\n\x10\x62\x61\x63klogQueueSize\x18\x03 \x01(\x05\x12.\n\x08lockedBy\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x0fserializedState\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x8f\x01\n\x19\x43leanEntityStorageRequest\x12\x37\n\x11\x63ontinuationToken\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1b\n\x13removeEmptyEntities\x18\x02 \x01(\x08\x12\x1c\n\x14releaseOrphanedLocks\x18\x03 \x01(\x08\"\x92\x01\n\x1a\x43leanEntityStorageResponse\x12\x37\n\x11\x63ontinuationToken\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1c\n\x14\x65mptyEntitiesRemoved\x18\x02 \x01(\x05\x12\x1d\n\x15orphanedLocksReleased\x18\x03 \x01(\x05\"]\n\x1cOrchestratorEntityParameters\x12=\n\x1a\x65ntityMessageReorderWindow\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\"\x82\x01\n\x12\x45ntityBatchRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65ntityState\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12%\n\noperations\x18\x03 \x03(\x0b\x32\x11.OperationRequest\"\xb9\x01\n\x11\x45ntityBatchResult\x12!\n\x07results\x18\x01 \x03(\x0b\x32\x10.OperationResult\x12!\n\x07\x61\x63tions\x18\x02 \x03(\x0b\x32\x10.OperationAction\x12\x31\n\x0b\x65ntityState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\"e\n\x10OperationRequest\x12\x11\n\toperation\x18\x01 \x01(\t\x12\x11\n\trequestId\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"w\n\x0fOperationResult\x12*\n\x07success\x18\x01 \x01(\x0b\x32\x17.OperationResultSuccessH\x00\x12*\n\x07\x66\x61ilure\x18\x02 \x01(\x0b\x32\x17.OperationResultFailureH\x00\x42\x0c\n\nresultType\"F\n\x16OperationResultSuccess\x12,\n\x06result\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"E\n\x16OperationResultFailure\x12+\n\x0e\x66\x61ilureDetails\x18\x01 \x01(\x0b\x32\x13.TaskFailureDetails\"\x9c\x01\n\x0fOperationAction\x12\n\n\x02id\x18\x01 \x01(\x05\x12\'\n\nsendSignal\x18\x02 \x01(\x0b\x32\x11.SendSignalActionH\x00\x12=\n\x15startNewOrchestration\x18\x03 \x01(\x0b\x32\x1c.StartNewOrchestrationActionH\x00\x42\x15\n\x13operationActionType\"\x94\x01\n\x10SendSignalAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rscheduledTime\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xce\x01\n\x1bStartNewOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rscheduledTime\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\x15\n\x13GetWorkItemsRequest\"\xe1\x01\n\x08WorkItem\x12\x33\n\x13orchestratorRequest\x18\x01 \x01(\x0b\x32\x14.OrchestratorRequestH\x00\x12+\n\x0f\x61\x63tivityRequest\x18\x02 \x01(\x0b\x32\x10.ActivityRequestH\x00\x12,\n\rentityRequest\x18\x03 \x01(\x0b\x32\x13.EntityBatchRequestH\x00\x12!\n\nhealthPing\x18\x04 \x01(\x0b\x32\x0b.HealthPingH\x00\x12\x17\n\x0f\x63ompletionToken\x18\n \x01(\tB\t\n\x07request\"\x16\n\x14\x43ompleteTaskResponse\"\x0c\n\nHealthPing*\xb5\x02\n\x13OrchestrationStatus\x12 \n\x1cORCHESTRATION_STATUS_RUNNING\x10\x00\x12\"\n\x1eORCHESTRATION_STATUS_COMPLETED\x10\x01\x12)\n%ORCHESTRATION_STATUS_CONTINUED_AS_NEW\x10\x02\x12\x1f\n\x1bORCHESTRATION_STATUS_FAILED\x10\x03\x12!\n\x1dORCHESTRATION_STATUS_CANCELED\x10\x04\x12#\n\x1fORCHESTRATION_STATUS_TERMINATED\x10\x05\x12 \n\x1cORCHESTRATION_STATUS_PENDING\x10\x06\x12\"\n\x1eORCHESTRATION_STATUS_SUSPENDED\x10\x07*A\n\x19\x43reateOrchestrationAction\x12\t\n\x05\x45RROR\x10\x00\x12\n\n\x06IGNORE\x10\x01\x12\r\n\tTERMINATE\x10\x02\x32\xfc\n\n\x15TaskHubSidecarService\x12\x37\n\x05Hello\x12\x16.google.protobuf.Empty\x1a\x16.google.protobuf.Empty\x12@\n\rStartInstance\x12\x16.CreateInstanceRequest\x1a\x17.CreateInstanceResponse\x12\x38\n\x0bGetInstance\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x41\n\x0eRewindInstance\x12\x16.RewindInstanceRequest\x1a\x17.RewindInstanceResponse\x12\x41\n\x14WaitForInstanceStart\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x46\n\x19WaitForInstanceCompletion\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x35\n\nRaiseEvent\x12\x12.RaiseEventRequest\x1a\x13.RaiseEventResponse\x12:\n\x11TerminateInstance\x12\x11.TerminateRequest\x1a\x12.TerminateResponse\x12\x34\n\x0fSuspendInstance\x12\x0f.SuspendRequest\x1a\x10.SuspendResponse\x12\x31\n\x0eResumeInstance\x12\x0e.ResumeRequest\x1a\x0f.ResumeResponse\x12\x41\n\x0eQueryInstances\x12\x16.QueryInstancesRequest\x1a\x17.QueryInstancesResponse\x12\x41\n\x0ePurgeInstances\x12\x16.PurgeInstancesRequest\x1a\x17.PurgeInstancesResponse\x12\x31\n\x0cGetWorkItems\x12\x14.GetWorkItemsRequest\x1a\t.WorkItem0\x01\x12@\n\x14\x43ompleteActivityTask\x12\x11.ActivityResponse\x1a\x15.CompleteTaskResponse\x12H\n\x18\x43ompleteOrchestratorTask\x12\x15.OrchestratorResponse\x1a\x15.CompleteTaskResponse\x12?\n\x12\x43ompleteEntityTask\x12\x12.EntityBatchResult\x1a\x15.CompleteTaskResponse\x12>\n\rCreateTaskHub\x12\x15.CreateTaskHubRequest\x1a\x16.CreateTaskHubResponse\x12>\n\rDeleteTaskHub\x12\x15.DeleteTaskHubRequest\x1a\x16.DeleteTaskHubResponse\x12;\n\x0cSignalEntity\x12\x14.SignalEntityRequest\x1a\x15.SignalEntityResponse\x12\x32\n\tGetEntity\x12\x11.GetEntityRequest\x1a\x12.GetEntityResponse\x12>\n\rQueryEntities\x12\x15.QueryEntitiesRequest\x1a\x16.QueryEntitiesResponse\x12M\n\x12\x43leanEntityStorage\x12\x1a.CleanEntityStorageRequest\x1a\x1b.CleanEntityStorageResponseBf\n1com.microsoft.durabletask.implementation.protobufZ\x10/internal/protos\xaa\x02\x1eMicrosoft.DurableTask.Protobufb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'orchestrator_service_pb2', _globals) -if not _descriptor._USE_C_DESCRIPTORS: - _globals['DESCRIPTOR']._loaded_options = None +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'durabletask.internal.orchestrator_service_pb2', _globals) +if _descriptor._USE_C_DESCRIPTORS == False: + _globals['DESCRIPTOR']._options = None _globals['DESCRIPTOR']._serialized_options = b'\n1com.microsoft.durabletask.implementation.protobufZ\020/internal/protos\252\002\036Microsoft.DurableTask.Protobuf' - _globals['_TRACECONTEXT'].fields_by_name['spanID']._loaded_options = None + _globals['_TRACECONTEXT'].fields_by_name['spanID']._options = None _globals['_TRACECONTEXT'].fields_by_name['spanID']._serialized_options = b'\030\001' - _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._loaded_options = None + _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._options = None _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_options = b'8\001' - _globals['_ORCHESTRATIONSTATUS']._serialized_start=12076 - _globals['_ORCHESTRATIONSTATUS']._serialized_end=12385 - _globals['_CREATEORCHESTRATIONACTION']._serialized_start=12387 - _globals['_CREATEORCHESTRATIONACTION']._serialized_end=12452 - _globals['_ORCHESTRATIONINSTANCE']._serialized_start=156 - _globals['_ORCHESTRATIONINSTANCE']._serialized_end=250 - _globals['_ACTIVITYREQUEST']._serialized_start=253 - _globals['_ACTIVITYREQUEST']._serialized_end=490 - _globals['_ACTIVITYRESPONSE']._serialized_start=493 - _globals['_ACTIVITYRESPONSE']._serialized_end=638 - _globals['_TASKFAILUREDETAILS']._serialized_start=641 - _globals['_TASKFAILUREDETAILS']._serialized_end=819 - _globals['_PARENTINSTANCEINFO']._serialized_start=822 - _globals['_PARENTINSTANCEINFO']._serialized_end=1013 - _globals['_TRACECONTEXT']._serialized_start=1015 - _globals['_TRACECONTEXT']._serialized_end=1120 - _globals['_EXECUTIONSTARTEDEVENT']._serialized_start=1123 - _globals['_EXECUTIONSTARTEDEVENT']._serialized_end=1515 - _globals['_EXECUTIONCOMPLETEDEVENT']._serialized_start=1518 - _globals['_EXECUTIONCOMPLETEDEVENT']._serialized_end=1685 - _globals['_EXECUTIONTERMINATEDEVENT']._serialized_start=1687 - _globals['_EXECUTIONTERMINATEDEVENT']._serialized_end=1775 - _globals['_TASKSCHEDULEDEVENT']._serialized_start=1778 - _globals['_TASKSCHEDULEDEVENT']._serialized_end=1947 - _globals['_TASKCOMPLETEDEVENT']._serialized_start=1949 - _globals['_TASKCOMPLETEDEVENT']._serialized_end=2040 - _globals['_TASKFAILEDEVENT']._serialized_start=2042 - _globals['_TASKFAILEDEVENT']._serialized_end=2129 - _globals['_SUBORCHESTRATIONINSTANCECREATEDEVENT']._serialized_start=2132 - _globals['_SUBORCHESTRATIONINSTANCECREATEDEVENT']._serialized_end=2339 - _globals['_SUBORCHESTRATIONINSTANCECOMPLETEDEVENT']._serialized_start=2341 - _globals['_SUBORCHESTRATIONINSTANCECOMPLETEDEVENT']._serialized_end=2452 - _globals['_SUBORCHESTRATIONINSTANCEFAILEDEVENT']._serialized_start=2454 - _globals['_SUBORCHESTRATIONINSTANCEFAILEDEVENT']._serialized_end=2561 - _globals['_TIMERCREATEDEVENT']._serialized_start=2563 - _globals['_TIMERCREATEDEVENT']._serialized_end=2626 - _globals['_TIMERFIREDEVENT']._serialized_start=2628 - _globals['_TIMERFIREDEVENT']._serialized_end=2706 - _globals['_ORCHESTRATORSTARTEDEVENT']._serialized_start=2708 - _globals['_ORCHESTRATORSTARTEDEVENT']._serialized_end=2734 - _globals['_ORCHESTRATORCOMPLETEDEVENT']._serialized_start=2736 - _globals['_ORCHESTRATORCOMPLETEDEVENT']._serialized_end=2764 - _globals['_EVENTSENTEVENT']._serialized_start=2766 - _globals['_EVENTSENTEVENT']._serialized_end=2861 - _globals['_EVENTRAISEDEVENT']._serialized_start=2863 - _globals['_EVENTRAISEDEVENT']._serialized_end=2940 - _globals['_GENERICEVENT']._serialized_start=2942 - _globals['_GENERICEVENT']._serialized_end=3000 - _globals['_HISTORYSTATEEVENT']._serialized_start=3002 - _globals['_HISTORYSTATEEVENT']._serialized_end=3070 - _globals['_CONTINUEASNEWEVENT']._serialized_start=3072 - _globals['_CONTINUEASNEWEVENT']._serialized_end=3137 - _globals['_EXECUTIONSUSPENDEDEVENT']._serialized_start=3139 - _globals['_EXECUTIONSUSPENDEDEVENT']._serialized_end=3209 - _globals['_EXECUTIONRESUMEDEVENT']._serialized_start=3211 - _globals['_EXECUTIONRESUMEDEVENT']._serialized_end=3279 - _globals['_HISTORYEVENT']._serialized_start=3282 - _globals['_HISTORYEVENT']._serialized_end=4440 - _globals['_SCHEDULETASKACTION']._serialized_start=4442 - _globals['_SCHEDULETASKACTION']._serialized_end=4568 - _globals['_CREATESUBORCHESTRATIONACTION']._serialized_start=4571 - _globals['_CREATESUBORCHESTRATIONACTION']._serialized_end=4727 - _globals['_CREATETIMERACTION']._serialized_start=4729 - _globals['_CREATETIMERACTION']._serialized_end=4792 - _globals['_SENDEVENTACTION']._serialized_start=4794 - _globals['_SENDEVENTACTION']._serialized_end=4911 - _globals['_COMPLETEORCHESTRATIONACTION']._serialized_start=4914 - _globals['_COMPLETEORCHESTRATIONACTION']._serialized_end=5222 - _globals['_TERMINATEORCHESTRATIONACTION']._serialized_start=5224 - _globals['_TERMINATEORCHESTRATIONACTION']._serialized_end=5337 - _globals['_ORCHESTRATORACTION']._serialized_start=5340 - _globals['_ORCHESTRATORACTION']._serialized_end=5718 - _globals['_ORCHESTRATORREQUEST']._serialized_start=5721 - _globals['_ORCHESTRATORREQUEST']._serialized_end=5939 - _globals['_ORCHESTRATORRESPONSE']._serialized_start=5942 - _globals['_ORCHESTRATORRESPONSE']._serialized_end=6074 - _globals['_CREATEINSTANCEREQUEST']._serialized_start=6077 - _globals['_CREATEINSTANCEREQUEST']._serialized_end=6496 - _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_start=6453 - _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_end=6496 - _globals['_ORCHESTRATIONIDREUSEPOLICY']._serialized_start=6498 - _globals['_ORCHESTRATIONIDREUSEPOLICY']._serialized_end=6617 - _globals['_CREATEINSTANCERESPONSE']._serialized_start=6619 - _globals['_CREATEINSTANCERESPONSE']._serialized_end=6663 - _globals['_GETINSTANCEREQUEST']._serialized_start=6665 - _globals['_GETINSTANCEREQUEST']._serialized_end=6734 - _globals['_GETINSTANCERESPONSE']._serialized_start=6736 - _globals['_GETINSTANCERESPONSE']._serialized_end=6822 - _globals['_REWINDINSTANCEREQUEST']._serialized_start=6824 - _globals['_REWINDINSTANCEREQUEST']._serialized_end=6913 - _globals['_REWINDINSTANCERESPONSE']._serialized_start=6915 - _globals['_REWINDINSTANCERESPONSE']._serialized_end=6939 - _globals['_ORCHESTRATIONSTATE']._serialized_start=6942 - _globals['_ORCHESTRATIONSTATE']._serialized_end=7618 - _globals['_RAISEEVENTREQUEST']._serialized_start=7620 - _globals['_RAISEEVENTREQUEST']._serialized_end=7718 - _globals['_RAISEEVENTRESPONSE']._serialized_start=7720 - _globals['_RAISEEVENTRESPONSE']._serialized_end=7740 - _globals['_TERMINATEREQUEST']._serialized_start=7742 - _globals['_TERMINATEREQUEST']._serialized_end=7845 - _globals['_TERMINATERESPONSE']._serialized_start=7847 - _globals['_TERMINATERESPONSE']._serialized_end=7866 - _globals['_SUSPENDREQUEST']._serialized_start=7868 - _globals['_SUSPENDREQUEST']._serialized_end=7950 - _globals['_SUSPENDRESPONSE']._serialized_start=7952 - _globals['_SUSPENDRESPONSE']._serialized_end=7969 - _globals['_RESUMEREQUEST']._serialized_start=7971 - _globals['_RESUMEREQUEST']._serialized_end=8052 - _globals['_RESUMERESPONSE']._serialized_start=8054 - _globals['_RESUMERESPONSE']._serialized_end=8070 - _globals['_QUERYINSTANCESREQUEST']._serialized_start=8072 - _globals['_QUERYINSTANCESREQUEST']._serialized_end=8126 - _globals['_INSTANCEQUERY']._serialized_start=8129 - _globals['_INSTANCEQUERY']._serialized_end=8515 - _globals['_QUERYINSTANCESRESPONSE']._serialized_start=8518 - _globals['_QUERYINSTANCESRESPONSE']._serialized_end=8648 - _globals['_PURGEINSTANCESREQUEST']._serialized_start=8651 - _globals['_PURGEINSTANCESREQUEST']._serialized_end=8779 - _globals['_PURGEINSTANCEFILTER']._serialized_start=8782 - _globals['_PURGEINSTANCEFILTER']._serialized_end=8952 - _globals['_PURGEINSTANCESRESPONSE']._serialized_start=8954 - _globals['_PURGEINSTANCESRESPONSE']._serialized_end=9008 - _globals['_CREATETASKHUBREQUEST']._serialized_start=9010 - _globals['_CREATETASKHUBREQUEST']._serialized_end=9058 - _globals['_CREATETASKHUBRESPONSE']._serialized_start=9060 - _globals['_CREATETASKHUBRESPONSE']._serialized_end=9083 - _globals['_DELETETASKHUBREQUEST']._serialized_start=9085 - _globals['_DELETETASKHUBREQUEST']._serialized_end=9107 - _globals['_DELETETASKHUBRESPONSE']._serialized_start=9109 - _globals['_DELETETASKHUBRESPONSE']._serialized_end=9132 - _globals['_SIGNALENTITYREQUEST']._serialized_start=9135 - _globals['_SIGNALENTITYREQUEST']._serialized_end=9305 - _globals['_SIGNALENTITYRESPONSE']._serialized_start=9307 - _globals['_SIGNALENTITYRESPONSE']._serialized_end=9329 - _globals['_GETENTITYREQUEST']._serialized_start=9331 - _globals['_GETENTITYREQUEST']._serialized_end=9391 - _globals['_GETENTITYRESPONSE']._serialized_start=9393 - _globals['_GETENTITYRESPONSE']._serialized_end=9461 - _globals['_ENTITYQUERY']._serialized_start=9464 - _globals['_ENTITYQUERY']._serialized_end=9795 - _globals['_QUERYENTITIESREQUEST']._serialized_start=9797 - _globals['_QUERYENTITIESREQUEST']._serialized_end=9848 - _globals['_QUERYENTITIESRESPONSE']._serialized_start=9850 - _globals['_QUERYENTITIESRESPONSE']._serialized_end=9965 - _globals['_ENTITYMETADATA']._serialized_start=9968 - _globals['_ENTITYMETADATA']._serialized_end=10187 - _globals['_CLEANENTITYSTORAGEREQUEST']._serialized_start=10190 - _globals['_CLEANENTITYSTORAGEREQUEST']._serialized_end=10333 - _globals['_CLEANENTITYSTORAGERESPONSE']._serialized_start=10336 - _globals['_CLEANENTITYSTORAGERESPONSE']._serialized_end=10482 - _globals['_ORCHESTRATORENTITYPARAMETERS']._serialized_start=10484 - _globals['_ORCHESTRATORENTITYPARAMETERS']._serialized_end=10577 - _globals['_ENTITYBATCHREQUEST']._serialized_start=10580 - _globals['_ENTITYBATCHREQUEST']._serialized_end=10710 - _globals['_ENTITYBATCHRESULT']._serialized_start=10713 - _globals['_ENTITYBATCHRESULT']._serialized_end=10898 - _globals['_OPERATIONREQUEST']._serialized_start=10900 - _globals['_OPERATIONREQUEST']._serialized_end=11001 - _globals['_OPERATIONRESULT']._serialized_start=11003 - _globals['_OPERATIONRESULT']._serialized_end=11122 - _globals['_OPERATIONRESULTSUCCESS']._serialized_start=11124 - _globals['_OPERATIONRESULTSUCCESS']._serialized_end=11194 - _globals['_OPERATIONRESULTFAILURE']._serialized_start=11196 - _globals['_OPERATIONRESULTFAILURE']._serialized_end=11265 - _globals['_OPERATIONACTION']._serialized_start=11268 - _globals['_OPERATIONACTION']._serialized_end=11424 - _globals['_SENDSIGNALACTION']._serialized_start=11427 - _globals['_SENDSIGNALACTION']._serialized_end=11575 - _globals['_STARTNEWORCHESTRATIONACTION']._serialized_start=11578 - _globals['_STARTNEWORCHESTRATIONACTION']._serialized_end=11784 - _globals['_GETWORKITEMSREQUEST']._serialized_start=11786 - _globals['_GETWORKITEMSREQUEST']._serialized_end=11807 - _globals['_WORKITEM']._serialized_start=11810 - _globals['_WORKITEM']._serialized_end=12035 - _globals['_COMPLETETASKRESPONSE']._serialized_start=12037 - _globals['_COMPLETETASKRESPONSE']._serialized_end=12059 - _globals['_HEALTHPING']._serialized_start=12061 - _globals['_HEALTHPING']._serialized_end=12073 - _globals['_TASKHUBSIDECARSERVICE']._serialized_start=12455 - _globals['_TASKHUBSIDECARSERVICE']._serialized_end=13859 + _globals['_ORCHESTRATIONSTATUS']._serialized_start=12097 + _globals['_ORCHESTRATIONSTATUS']._serialized_end=12406 + _globals['_CREATEORCHESTRATIONACTION']._serialized_start=12408 + _globals['_CREATEORCHESTRATIONACTION']._serialized_end=12473 + _globals['_ORCHESTRATIONINSTANCE']._serialized_start=177 + _globals['_ORCHESTRATIONINSTANCE']._serialized_end=271 + _globals['_ACTIVITYREQUEST']._serialized_start=274 + _globals['_ACTIVITYREQUEST']._serialized_end=511 + _globals['_ACTIVITYRESPONSE']._serialized_start=514 + _globals['_ACTIVITYRESPONSE']._serialized_end=659 + _globals['_TASKFAILUREDETAILS']._serialized_start=662 + _globals['_TASKFAILUREDETAILS']._serialized_end=840 + _globals['_PARENTINSTANCEINFO']._serialized_start=843 + _globals['_PARENTINSTANCEINFO']._serialized_end=1034 + _globals['_TRACECONTEXT']._serialized_start=1036 + _globals['_TRACECONTEXT']._serialized_end=1141 + _globals['_EXECUTIONSTARTEDEVENT']._serialized_start=1144 + _globals['_EXECUTIONSTARTEDEVENT']._serialized_end=1536 + _globals['_EXECUTIONCOMPLETEDEVENT']._serialized_start=1539 + _globals['_EXECUTIONCOMPLETEDEVENT']._serialized_end=1706 + _globals['_EXECUTIONTERMINATEDEVENT']._serialized_start=1708 + _globals['_EXECUTIONTERMINATEDEVENT']._serialized_end=1796 + _globals['_TASKSCHEDULEDEVENT']._serialized_start=1799 + _globals['_TASKSCHEDULEDEVENT']._serialized_end=1968 + _globals['_TASKCOMPLETEDEVENT']._serialized_start=1970 + _globals['_TASKCOMPLETEDEVENT']._serialized_end=2061 + _globals['_TASKFAILEDEVENT']._serialized_start=2063 + _globals['_TASKFAILEDEVENT']._serialized_end=2150 + _globals['_SUBORCHESTRATIONINSTANCECREATEDEVENT']._serialized_start=2153 + _globals['_SUBORCHESTRATIONINSTANCECREATEDEVENT']._serialized_end=2360 + _globals['_SUBORCHESTRATIONINSTANCECOMPLETEDEVENT']._serialized_start=2362 + _globals['_SUBORCHESTRATIONINSTANCECOMPLETEDEVENT']._serialized_end=2473 + _globals['_SUBORCHESTRATIONINSTANCEFAILEDEVENT']._serialized_start=2475 + _globals['_SUBORCHESTRATIONINSTANCEFAILEDEVENT']._serialized_end=2582 + _globals['_TIMERCREATEDEVENT']._serialized_start=2584 + _globals['_TIMERCREATEDEVENT']._serialized_end=2647 + _globals['_TIMERFIREDEVENT']._serialized_start=2649 + _globals['_TIMERFIREDEVENT']._serialized_end=2727 + _globals['_ORCHESTRATORSTARTEDEVENT']._serialized_start=2729 + _globals['_ORCHESTRATORSTARTEDEVENT']._serialized_end=2755 + _globals['_ORCHESTRATORCOMPLETEDEVENT']._serialized_start=2757 + _globals['_ORCHESTRATORCOMPLETEDEVENT']._serialized_end=2785 + _globals['_EVENTSENTEVENT']._serialized_start=2787 + _globals['_EVENTSENTEVENT']._serialized_end=2882 + _globals['_EVENTRAISEDEVENT']._serialized_start=2884 + _globals['_EVENTRAISEDEVENT']._serialized_end=2961 + _globals['_GENERICEVENT']._serialized_start=2963 + _globals['_GENERICEVENT']._serialized_end=3021 + _globals['_HISTORYSTATEEVENT']._serialized_start=3023 + _globals['_HISTORYSTATEEVENT']._serialized_end=3091 + _globals['_CONTINUEASNEWEVENT']._serialized_start=3093 + _globals['_CONTINUEASNEWEVENT']._serialized_end=3158 + _globals['_EXECUTIONSUSPENDEDEVENT']._serialized_start=3160 + _globals['_EXECUTIONSUSPENDEDEVENT']._serialized_end=3230 + _globals['_EXECUTIONRESUMEDEVENT']._serialized_start=3232 + _globals['_EXECUTIONRESUMEDEVENT']._serialized_end=3300 + _globals['_HISTORYEVENT']._serialized_start=3303 + _globals['_HISTORYEVENT']._serialized_end=4461 + _globals['_SCHEDULETASKACTION']._serialized_start=4463 + _globals['_SCHEDULETASKACTION']._serialized_end=4589 + _globals['_CREATESUBORCHESTRATIONACTION']._serialized_start=4592 + _globals['_CREATESUBORCHESTRATIONACTION']._serialized_end=4748 + _globals['_CREATETIMERACTION']._serialized_start=4750 + _globals['_CREATETIMERACTION']._serialized_end=4813 + _globals['_SENDEVENTACTION']._serialized_start=4815 + _globals['_SENDEVENTACTION']._serialized_end=4932 + _globals['_COMPLETEORCHESTRATIONACTION']._serialized_start=4935 + _globals['_COMPLETEORCHESTRATIONACTION']._serialized_end=5243 + _globals['_TERMINATEORCHESTRATIONACTION']._serialized_start=5245 + _globals['_TERMINATEORCHESTRATIONACTION']._serialized_end=5358 + _globals['_ORCHESTRATORACTION']._serialized_start=5361 + _globals['_ORCHESTRATORACTION']._serialized_end=5739 + _globals['_ORCHESTRATORREQUEST']._serialized_start=5742 + _globals['_ORCHESTRATORREQUEST']._serialized_end=5960 + _globals['_ORCHESTRATORRESPONSE']._serialized_start=5963 + _globals['_ORCHESTRATORRESPONSE']._serialized_end=6095 + _globals['_CREATEINSTANCEREQUEST']._serialized_start=6098 + _globals['_CREATEINSTANCEREQUEST']._serialized_end=6517 + _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_start=6474 + _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_end=6517 + _globals['_ORCHESTRATIONIDREUSEPOLICY']._serialized_start=6519 + _globals['_ORCHESTRATIONIDREUSEPOLICY']._serialized_end=6638 + _globals['_CREATEINSTANCERESPONSE']._serialized_start=6640 + _globals['_CREATEINSTANCERESPONSE']._serialized_end=6684 + _globals['_GETINSTANCEREQUEST']._serialized_start=6686 + _globals['_GETINSTANCEREQUEST']._serialized_end=6755 + _globals['_GETINSTANCERESPONSE']._serialized_start=6757 + _globals['_GETINSTANCERESPONSE']._serialized_end=6843 + _globals['_REWINDINSTANCEREQUEST']._serialized_start=6845 + _globals['_REWINDINSTANCEREQUEST']._serialized_end=6934 + _globals['_REWINDINSTANCERESPONSE']._serialized_start=6936 + _globals['_REWINDINSTANCERESPONSE']._serialized_end=6960 + _globals['_ORCHESTRATIONSTATE']._serialized_start=6963 + _globals['_ORCHESTRATIONSTATE']._serialized_end=7639 + _globals['_RAISEEVENTREQUEST']._serialized_start=7641 + _globals['_RAISEEVENTREQUEST']._serialized_end=7739 + _globals['_RAISEEVENTRESPONSE']._serialized_start=7741 + _globals['_RAISEEVENTRESPONSE']._serialized_end=7761 + _globals['_TERMINATEREQUEST']._serialized_start=7763 + _globals['_TERMINATEREQUEST']._serialized_end=7866 + _globals['_TERMINATERESPONSE']._serialized_start=7868 + _globals['_TERMINATERESPONSE']._serialized_end=7887 + _globals['_SUSPENDREQUEST']._serialized_start=7889 + _globals['_SUSPENDREQUEST']._serialized_end=7971 + _globals['_SUSPENDRESPONSE']._serialized_start=7973 + _globals['_SUSPENDRESPONSE']._serialized_end=7990 + _globals['_RESUMEREQUEST']._serialized_start=7992 + _globals['_RESUMEREQUEST']._serialized_end=8073 + _globals['_RESUMERESPONSE']._serialized_start=8075 + _globals['_RESUMERESPONSE']._serialized_end=8091 + _globals['_QUERYINSTANCESREQUEST']._serialized_start=8093 + _globals['_QUERYINSTANCESREQUEST']._serialized_end=8147 + _globals['_INSTANCEQUERY']._serialized_start=8150 + _globals['_INSTANCEQUERY']._serialized_end=8536 + _globals['_QUERYINSTANCESRESPONSE']._serialized_start=8539 + _globals['_QUERYINSTANCESRESPONSE']._serialized_end=8669 + _globals['_PURGEINSTANCESREQUEST']._serialized_start=8672 + _globals['_PURGEINSTANCESREQUEST']._serialized_end=8800 + _globals['_PURGEINSTANCEFILTER']._serialized_start=8803 + _globals['_PURGEINSTANCEFILTER']._serialized_end=8973 + _globals['_PURGEINSTANCESRESPONSE']._serialized_start=8975 + _globals['_PURGEINSTANCESRESPONSE']._serialized_end=9029 + _globals['_CREATETASKHUBREQUEST']._serialized_start=9031 + _globals['_CREATETASKHUBREQUEST']._serialized_end=9079 + _globals['_CREATETASKHUBRESPONSE']._serialized_start=9081 + _globals['_CREATETASKHUBRESPONSE']._serialized_end=9104 + _globals['_DELETETASKHUBREQUEST']._serialized_start=9106 + _globals['_DELETETASKHUBREQUEST']._serialized_end=9128 + _globals['_DELETETASKHUBRESPONSE']._serialized_start=9130 + _globals['_DELETETASKHUBRESPONSE']._serialized_end=9153 + _globals['_SIGNALENTITYREQUEST']._serialized_start=9156 + _globals['_SIGNALENTITYREQUEST']._serialized_end=9326 + _globals['_SIGNALENTITYRESPONSE']._serialized_start=9328 + _globals['_SIGNALENTITYRESPONSE']._serialized_end=9350 + _globals['_GETENTITYREQUEST']._serialized_start=9352 + _globals['_GETENTITYREQUEST']._serialized_end=9412 + _globals['_GETENTITYRESPONSE']._serialized_start=9414 + _globals['_GETENTITYRESPONSE']._serialized_end=9482 + _globals['_ENTITYQUERY']._serialized_start=9485 + _globals['_ENTITYQUERY']._serialized_end=9816 + _globals['_QUERYENTITIESREQUEST']._serialized_start=9818 + _globals['_QUERYENTITIESREQUEST']._serialized_end=9869 + _globals['_QUERYENTITIESRESPONSE']._serialized_start=9871 + _globals['_QUERYENTITIESRESPONSE']._serialized_end=9986 + _globals['_ENTITYMETADATA']._serialized_start=9989 + _globals['_ENTITYMETADATA']._serialized_end=10208 + _globals['_CLEANENTITYSTORAGEREQUEST']._serialized_start=10211 + _globals['_CLEANENTITYSTORAGEREQUEST']._serialized_end=10354 + _globals['_CLEANENTITYSTORAGERESPONSE']._serialized_start=10357 + _globals['_CLEANENTITYSTORAGERESPONSE']._serialized_end=10503 + _globals['_ORCHESTRATORENTITYPARAMETERS']._serialized_start=10505 + _globals['_ORCHESTRATORENTITYPARAMETERS']._serialized_end=10598 + _globals['_ENTITYBATCHREQUEST']._serialized_start=10601 + _globals['_ENTITYBATCHREQUEST']._serialized_end=10731 + _globals['_ENTITYBATCHRESULT']._serialized_start=10734 + _globals['_ENTITYBATCHRESULT']._serialized_end=10919 + _globals['_OPERATIONREQUEST']._serialized_start=10921 + _globals['_OPERATIONREQUEST']._serialized_end=11022 + _globals['_OPERATIONRESULT']._serialized_start=11024 + _globals['_OPERATIONRESULT']._serialized_end=11143 + _globals['_OPERATIONRESULTSUCCESS']._serialized_start=11145 + _globals['_OPERATIONRESULTSUCCESS']._serialized_end=11215 + _globals['_OPERATIONRESULTFAILURE']._serialized_start=11217 + _globals['_OPERATIONRESULTFAILURE']._serialized_end=11286 + _globals['_OPERATIONACTION']._serialized_start=11289 + _globals['_OPERATIONACTION']._serialized_end=11445 + _globals['_SENDSIGNALACTION']._serialized_start=11448 + _globals['_SENDSIGNALACTION']._serialized_end=11596 + _globals['_STARTNEWORCHESTRATIONACTION']._serialized_start=11599 + _globals['_STARTNEWORCHESTRATIONACTION']._serialized_end=11805 + _globals['_GETWORKITEMSREQUEST']._serialized_start=11807 + _globals['_GETWORKITEMSREQUEST']._serialized_end=11828 + _globals['_WORKITEM']._serialized_start=11831 + _globals['_WORKITEM']._serialized_end=12056 + _globals['_COMPLETETASKRESPONSE']._serialized_start=12058 + _globals['_COMPLETETASKRESPONSE']._serialized_end=12080 + _globals['_HEALTHPING']._serialized_start=12082 + _globals['_HEALTHPING']._serialized_end=12094 + _globals['_TASKHUBSIDECARSERVICE']._serialized_start=12476 + _globals['_TASKHUBSIDECARSERVICE']._serialized_end=13880 # @@protoc_insertion_point(module_scope) diff --git a/durabletask/internal/orchestrator_service_pb2_grpc.py b/durabletask/internal/orchestrator_service_pb2_grpc.py index f11cf4b..3638bf6 100644 --- a/durabletask/internal/orchestrator_service_pb2_grpc.py +++ b/durabletask/internal/orchestrator_service_pb2_grpc.py @@ -1,32 +1,10 @@ # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! """Client and server classes corresponding to protobuf-defined services.""" import grpc -import warnings +from durabletask.internal import orchestrator_service_pb2 as durabletask_dot_internal_dot_orchestrator__service__pb2 from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -# TODO: This is a manual edit. Need to figure out how to not manually edit this file. -import durabletask.internal.orchestrator_service_pb2 as orchestrator__service__pb2 - -GRPC_GENERATED_VERSION = '1.67.0' -GRPC_VERSION = grpc.__version__ -_version_not_supported = False - -try: - from grpc._utilities import first_version_is_lower - _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION) -except ImportError: - _version_not_supported = True - -if _version_not_supported: - raise RuntimeError( - f'The grpc package installed is at version {GRPC_VERSION},' - + f' but the generated code in orchestrator_service_pb2_grpc.py depends on' - + f' grpcio>={GRPC_GENERATED_VERSION}.' - + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}' - + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.' - ) - class TaskHubSidecarServiceStub(object): """Missing associated documentation comment in .proto file.""" @@ -41,112 +19,112 @@ def __init__(self, channel): '/TaskHubSidecarService/Hello', request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - _registered_method=True) + ) self.StartInstance = channel.unary_unary( '/TaskHubSidecarService/StartInstance', - request_serializer=orchestrator__service__pb2.CreateInstanceRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.CreateInstanceResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CreateInstanceRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CreateInstanceResponse.FromString, + ) self.GetInstance = channel.unary_unary( '/TaskHubSidecarService/GetInstance', - request_serializer=orchestrator__service__pb2.GetInstanceRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.GetInstanceResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceResponse.FromString, + ) self.RewindInstance = channel.unary_unary( '/TaskHubSidecarService/RewindInstance', - request_serializer=orchestrator__service__pb2.RewindInstanceRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.RewindInstanceResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RewindInstanceRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RewindInstanceResponse.FromString, + ) self.WaitForInstanceStart = channel.unary_unary( '/TaskHubSidecarService/WaitForInstanceStart', - request_serializer=orchestrator__service__pb2.GetInstanceRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.GetInstanceResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceResponse.FromString, + ) self.WaitForInstanceCompletion = channel.unary_unary( '/TaskHubSidecarService/WaitForInstanceCompletion', - request_serializer=orchestrator__service__pb2.GetInstanceRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.GetInstanceResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceResponse.FromString, + ) self.RaiseEvent = channel.unary_unary( '/TaskHubSidecarService/RaiseEvent', - request_serializer=orchestrator__service__pb2.RaiseEventRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.RaiseEventResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RaiseEventRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RaiseEventResponse.FromString, + ) self.TerminateInstance = channel.unary_unary( '/TaskHubSidecarService/TerminateInstance', - request_serializer=orchestrator__service__pb2.TerminateRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.TerminateResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.TerminateRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.TerminateResponse.FromString, + ) self.SuspendInstance = channel.unary_unary( '/TaskHubSidecarService/SuspendInstance', - request_serializer=orchestrator__service__pb2.SuspendRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.SuspendResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.SuspendRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.SuspendResponse.FromString, + ) self.ResumeInstance = channel.unary_unary( '/TaskHubSidecarService/ResumeInstance', - request_serializer=orchestrator__service__pb2.ResumeRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.ResumeResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.ResumeRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.ResumeResponse.FromString, + ) self.QueryInstances = channel.unary_unary( '/TaskHubSidecarService/QueryInstances', - request_serializer=orchestrator__service__pb2.QueryInstancesRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.QueryInstancesResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.QueryInstancesRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.QueryInstancesResponse.FromString, + ) self.PurgeInstances = channel.unary_unary( '/TaskHubSidecarService/PurgeInstances', - request_serializer=orchestrator__service__pb2.PurgeInstancesRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.PurgeInstancesResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.PurgeInstancesRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.PurgeInstancesResponse.FromString, + ) self.GetWorkItems = channel.unary_stream( '/TaskHubSidecarService/GetWorkItems', - request_serializer=orchestrator__service__pb2.GetWorkItemsRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.WorkItem.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetWorkItemsRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.WorkItem.FromString, + ) self.CompleteActivityTask = channel.unary_unary( '/TaskHubSidecarService/CompleteActivityTask', - request_serializer=orchestrator__service__pb2.ActivityResponse.SerializeToString, - response_deserializer=orchestrator__service__pb2.CompleteTaskResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.ActivityResponse.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CompleteTaskResponse.FromString, + ) self.CompleteOrchestratorTask = channel.unary_unary( '/TaskHubSidecarService/CompleteOrchestratorTask', - request_serializer=orchestrator__service__pb2.OrchestratorResponse.SerializeToString, - response_deserializer=orchestrator__service__pb2.CompleteTaskResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.OrchestratorResponse.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CompleteTaskResponse.FromString, + ) self.CompleteEntityTask = channel.unary_unary( '/TaskHubSidecarService/CompleteEntityTask', - request_serializer=orchestrator__service__pb2.EntityBatchResult.SerializeToString, - response_deserializer=orchestrator__service__pb2.CompleteTaskResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.EntityBatchResult.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CompleteTaskResponse.FromString, + ) self.CreateTaskHub = channel.unary_unary( '/TaskHubSidecarService/CreateTaskHub', - request_serializer=orchestrator__service__pb2.CreateTaskHubRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.CreateTaskHubResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CreateTaskHubRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CreateTaskHubResponse.FromString, + ) self.DeleteTaskHub = channel.unary_unary( '/TaskHubSidecarService/DeleteTaskHub', - request_serializer=orchestrator__service__pb2.DeleteTaskHubRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.DeleteTaskHubResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.DeleteTaskHubRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.DeleteTaskHubResponse.FromString, + ) self.SignalEntity = channel.unary_unary( '/TaskHubSidecarService/SignalEntity', - request_serializer=orchestrator__service__pb2.SignalEntityRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.SignalEntityResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.SignalEntityRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.SignalEntityResponse.FromString, + ) self.GetEntity = channel.unary_unary( '/TaskHubSidecarService/GetEntity', - request_serializer=orchestrator__service__pb2.GetEntityRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.GetEntityResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetEntityRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetEntityResponse.FromString, + ) self.QueryEntities = channel.unary_unary( '/TaskHubSidecarService/QueryEntities', - request_serializer=orchestrator__service__pb2.QueryEntitiesRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.QueryEntitiesResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.QueryEntitiesRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.QueryEntitiesResponse.FromString, + ) self.CleanEntityStorage = channel.unary_unary( '/TaskHubSidecarService/CleanEntityStorage', - request_serializer=orchestrator__service__pb2.CleanEntityStorageRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.CleanEntityStorageResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CleanEntityStorageRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CleanEntityStorageResponse.FromString, + ) class TaskHubSidecarServiceServicer(object): @@ -312,114 +290,113 @@ def add_TaskHubSidecarServiceServicer_to_server(servicer, server): ), 'StartInstance': grpc.unary_unary_rpc_method_handler( servicer.StartInstance, - request_deserializer=orchestrator__service__pb2.CreateInstanceRequest.FromString, - response_serializer=orchestrator__service__pb2.CreateInstanceResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CreateInstanceRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CreateInstanceResponse.SerializeToString, ), 'GetInstance': grpc.unary_unary_rpc_method_handler( servicer.GetInstance, - request_deserializer=orchestrator__service__pb2.GetInstanceRequest.FromString, - response_serializer=orchestrator__service__pb2.GetInstanceResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceResponse.SerializeToString, ), 'RewindInstance': grpc.unary_unary_rpc_method_handler( servicer.RewindInstance, - request_deserializer=orchestrator__service__pb2.RewindInstanceRequest.FromString, - response_serializer=orchestrator__service__pb2.RewindInstanceResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RewindInstanceRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RewindInstanceResponse.SerializeToString, ), 'WaitForInstanceStart': grpc.unary_unary_rpc_method_handler( servicer.WaitForInstanceStart, - request_deserializer=orchestrator__service__pb2.GetInstanceRequest.FromString, - response_serializer=orchestrator__service__pb2.GetInstanceResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceResponse.SerializeToString, ), 'WaitForInstanceCompletion': grpc.unary_unary_rpc_method_handler( servicer.WaitForInstanceCompletion, - request_deserializer=orchestrator__service__pb2.GetInstanceRequest.FromString, - response_serializer=orchestrator__service__pb2.GetInstanceResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceResponse.SerializeToString, ), 'RaiseEvent': grpc.unary_unary_rpc_method_handler( servicer.RaiseEvent, - request_deserializer=orchestrator__service__pb2.RaiseEventRequest.FromString, - response_serializer=orchestrator__service__pb2.RaiseEventResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RaiseEventRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RaiseEventResponse.SerializeToString, ), 'TerminateInstance': grpc.unary_unary_rpc_method_handler( servicer.TerminateInstance, - request_deserializer=orchestrator__service__pb2.TerminateRequest.FromString, - response_serializer=orchestrator__service__pb2.TerminateResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.TerminateRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.TerminateResponse.SerializeToString, ), 'SuspendInstance': grpc.unary_unary_rpc_method_handler( servicer.SuspendInstance, - request_deserializer=orchestrator__service__pb2.SuspendRequest.FromString, - response_serializer=orchestrator__service__pb2.SuspendResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.SuspendRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.SuspendResponse.SerializeToString, ), 'ResumeInstance': grpc.unary_unary_rpc_method_handler( servicer.ResumeInstance, - request_deserializer=orchestrator__service__pb2.ResumeRequest.FromString, - response_serializer=orchestrator__service__pb2.ResumeResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.ResumeRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.ResumeResponse.SerializeToString, ), 'QueryInstances': grpc.unary_unary_rpc_method_handler( servicer.QueryInstances, - request_deserializer=orchestrator__service__pb2.QueryInstancesRequest.FromString, - response_serializer=orchestrator__service__pb2.QueryInstancesResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.QueryInstancesRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.QueryInstancesResponse.SerializeToString, ), 'PurgeInstances': grpc.unary_unary_rpc_method_handler( servicer.PurgeInstances, - request_deserializer=orchestrator__service__pb2.PurgeInstancesRequest.FromString, - response_serializer=orchestrator__service__pb2.PurgeInstancesResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.PurgeInstancesRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.PurgeInstancesResponse.SerializeToString, ), 'GetWorkItems': grpc.unary_stream_rpc_method_handler( servicer.GetWorkItems, - request_deserializer=orchestrator__service__pb2.GetWorkItemsRequest.FromString, - response_serializer=orchestrator__service__pb2.WorkItem.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetWorkItemsRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.WorkItem.SerializeToString, ), 'CompleteActivityTask': grpc.unary_unary_rpc_method_handler( servicer.CompleteActivityTask, - request_deserializer=orchestrator__service__pb2.ActivityResponse.FromString, - response_serializer=orchestrator__service__pb2.CompleteTaskResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.ActivityResponse.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CompleteTaskResponse.SerializeToString, ), 'CompleteOrchestratorTask': grpc.unary_unary_rpc_method_handler( servicer.CompleteOrchestratorTask, - request_deserializer=orchestrator__service__pb2.OrchestratorResponse.FromString, - response_serializer=orchestrator__service__pb2.CompleteTaskResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.OrchestratorResponse.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CompleteTaskResponse.SerializeToString, ), 'CompleteEntityTask': grpc.unary_unary_rpc_method_handler( servicer.CompleteEntityTask, - request_deserializer=orchestrator__service__pb2.EntityBatchResult.FromString, - response_serializer=orchestrator__service__pb2.CompleteTaskResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.EntityBatchResult.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CompleteTaskResponse.SerializeToString, ), 'CreateTaskHub': grpc.unary_unary_rpc_method_handler( servicer.CreateTaskHub, - request_deserializer=orchestrator__service__pb2.CreateTaskHubRequest.FromString, - response_serializer=orchestrator__service__pb2.CreateTaskHubResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CreateTaskHubRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CreateTaskHubResponse.SerializeToString, ), 'DeleteTaskHub': grpc.unary_unary_rpc_method_handler( servicer.DeleteTaskHub, - request_deserializer=orchestrator__service__pb2.DeleteTaskHubRequest.FromString, - response_serializer=orchestrator__service__pb2.DeleteTaskHubResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.DeleteTaskHubRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.DeleteTaskHubResponse.SerializeToString, ), 'SignalEntity': grpc.unary_unary_rpc_method_handler( servicer.SignalEntity, - request_deserializer=orchestrator__service__pb2.SignalEntityRequest.FromString, - response_serializer=orchestrator__service__pb2.SignalEntityResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.SignalEntityRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.SignalEntityResponse.SerializeToString, ), 'GetEntity': grpc.unary_unary_rpc_method_handler( servicer.GetEntity, - request_deserializer=orchestrator__service__pb2.GetEntityRequest.FromString, - response_serializer=orchestrator__service__pb2.GetEntityResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetEntityRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetEntityResponse.SerializeToString, ), 'QueryEntities': grpc.unary_unary_rpc_method_handler( servicer.QueryEntities, - request_deserializer=orchestrator__service__pb2.QueryEntitiesRequest.FromString, - response_serializer=orchestrator__service__pb2.QueryEntitiesResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.QueryEntitiesRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.QueryEntitiesResponse.SerializeToString, ), 'CleanEntityStorage': grpc.unary_unary_rpc_method_handler( servicer.CleanEntityStorage, - request_deserializer=orchestrator__service__pb2.CleanEntityStorageRequest.FromString, - response_serializer=orchestrator__service__pb2.CleanEntityStorageResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CleanEntityStorageRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CleanEntityStorageResponse.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'TaskHubSidecarService', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) - server.add_registered_method_handlers('TaskHubSidecarService', rpc_method_handlers) # This class is part of an EXPERIMENTAL API. @@ -437,21 +414,11 @@ def Hello(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/Hello', + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/Hello', google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def StartInstance(request, @@ -464,21 +431,11 @@ def StartInstance(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/StartInstance', - orchestrator__service__pb2.CreateInstanceRequest.SerializeToString, - orchestrator__service__pb2.CreateInstanceResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/StartInstance', + durabletask_dot_internal_dot_orchestrator__service__pb2.CreateInstanceRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.CreateInstanceResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def GetInstance(request, @@ -491,21 +448,11 @@ def GetInstance(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/GetInstance', - orchestrator__service__pb2.GetInstanceRequest.SerializeToString, - orchestrator__service__pb2.GetInstanceResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/GetInstance', + durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def RewindInstance(request, @@ -518,21 +465,11 @@ def RewindInstance(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/RewindInstance', - orchestrator__service__pb2.RewindInstanceRequest.SerializeToString, - orchestrator__service__pb2.RewindInstanceResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/RewindInstance', + durabletask_dot_internal_dot_orchestrator__service__pb2.RewindInstanceRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.RewindInstanceResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def WaitForInstanceStart(request, @@ -545,21 +482,11 @@ def WaitForInstanceStart(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/WaitForInstanceStart', - orchestrator__service__pb2.GetInstanceRequest.SerializeToString, - orchestrator__service__pb2.GetInstanceResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/WaitForInstanceStart', + durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def WaitForInstanceCompletion(request, @@ -572,21 +499,11 @@ def WaitForInstanceCompletion(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/WaitForInstanceCompletion', - orchestrator__service__pb2.GetInstanceRequest.SerializeToString, - orchestrator__service__pb2.GetInstanceResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/WaitForInstanceCompletion', + durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def RaiseEvent(request, @@ -599,21 +516,11 @@ def RaiseEvent(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/RaiseEvent', - orchestrator__service__pb2.RaiseEventRequest.SerializeToString, - orchestrator__service__pb2.RaiseEventResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/RaiseEvent', + durabletask_dot_internal_dot_orchestrator__service__pb2.RaiseEventRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.RaiseEventResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def TerminateInstance(request, @@ -626,21 +533,11 @@ def TerminateInstance(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/TerminateInstance', - orchestrator__service__pb2.TerminateRequest.SerializeToString, - orchestrator__service__pb2.TerminateResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/TerminateInstance', + durabletask_dot_internal_dot_orchestrator__service__pb2.TerminateRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.TerminateResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def SuspendInstance(request, @@ -653,21 +550,11 @@ def SuspendInstance(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/SuspendInstance', - orchestrator__service__pb2.SuspendRequest.SerializeToString, - orchestrator__service__pb2.SuspendResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/SuspendInstance', + durabletask_dot_internal_dot_orchestrator__service__pb2.SuspendRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.SuspendResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def ResumeInstance(request, @@ -680,21 +567,11 @@ def ResumeInstance(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/ResumeInstance', - orchestrator__service__pb2.ResumeRequest.SerializeToString, - orchestrator__service__pb2.ResumeResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/ResumeInstance', + durabletask_dot_internal_dot_orchestrator__service__pb2.ResumeRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.ResumeResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def QueryInstances(request, @@ -707,21 +584,11 @@ def QueryInstances(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/QueryInstances', - orchestrator__service__pb2.QueryInstancesRequest.SerializeToString, - orchestrator__service__pb2.QueryInstancesResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/QueryInstances', + durabletask_dot_internal_dot_orchestrator__service__pb2.QueryInstancesRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.QueryInstancesResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def PurgeInstances(request, @@ -734,21 +601,11 @@ def PurgeInstances(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/PurgeInstances', - orchestrator__service__pb2.PurgeInstancesRequest.SerializeToString, - orchestrator__service__pb2.PurgeInstancesResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/PurgeInstances', + durabletask_dot_internal_dot_orchestrator__service__pb2.PurgeInstancesRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.PurgeInstancesResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def GetWorkItems(request, @@ -761,21 +618,11 @@ def GetWorkItems(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_stream( - request, - target, - '/TaskHubSidecarService/GetWorkItems', - orchestrator__service__pb2.GetWorkItemsRequest.SerializeToString, - orchestrator__service__pb2.WorkItem.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_stream(request, target, '/TaskHubSidecarService/GetWorkItems', + durabletask_dot_internal_dot_orchestrator__service__pb2.GetWorkItemsRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.WorkItem.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def CompleteActivityTask(request, @@ -788,21 +635,11 @@ def CompleteActivityTask(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/CompleteActivityTask', - orchestrator__service__pb2.ActivityResponse.SerializeToString, - orchestrator__service__pb2.CompleteTaskResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/CompleteActivityTask', + durabletask_dot_internal_dot_orchestrator__service__pb2.ActivityResponse.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.CompleteTaskResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def CompleteOrchestratorTask(request, @@ -815,21 +652,11 @@ def CompleteOrchestratorTask(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/CompleteOrchestratorTask', - orchestrator__service__pb2.OrchestratorResponse.SerializeToString, - orchestrator__service__pb2.CompleteTaskResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/CompleteOrchestratorTask', + durabletask_dot_internal_dot_orchestrator__service__pb2.OrchestratorResponse.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.CompleteTaskResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def CompleteEntityTask(request, @@ -842,21 +669,11 @@ def CompleteEntityTask(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/CompleteEntityTask', - orchestrator__service__pb2.EntityBatchResult.SerializeToString, - orchestrator__service__pb2.CompleteTaskResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/CompleteEntityTask', + durabletask_dot_internal_dot_orchestrator__service__pb2.EntityBatchResult.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.CompleteTaskResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def CreateTaskHub(request, @@ -869,21 +686,11 @@ def CreateTaskHub(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/CreateTaskHub', - orchestrator__service__pb2.CreateTaskHubRequest.SerializeToString, - orchestrator__service__pb2.CreateTaskHubResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/CreateTaskHub', + durabletask_dot_internal_dot_orchestrator__service__pb2.CreateTaskHubRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.CreateTaskHubResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def DeleteTaskHub(request, @@ -896,21 +703,11 @@ def DeleteTaskHub(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/DeleteTaskHub', - orchestrator__service__pb2.DeleteTaskHubRequest.SerializeToString, - orchestrator__service__pb2.DeleteTaskHubResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/DeleteTaskHub', + durabletask_dot_internal_dot_orchestrator__service__pb2.DeleteTaskHubRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.DeleteTaskHubResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def SignalEntity(request, @@ -923,21 +720,11 @@ def SignalEntity(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/SignalEntity', - orchestrator__service__pb2.SignalEntityRequest.SerializeToString, - orchestrator__service__pb2.SignalEntityResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/SignalEntity', + durabletask_dot_internal_dot_orchestrator__service__pb2.SignalEntityRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.SignalEntityResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def GetEntity(request, @@ -950,21 +737,11 @@ def GetEntity(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/GetEntity', - orchestrator__service__pb2.GetEntityRequest.SerializeToString, - orchestrator__service__pb2.GetEntityResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/GetEntity', + durabletask_dot_internal_dot_orchestrator__service__pb2.GetEntityRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.GetEntityResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def QueryEntities(request, @@ -977,21 +754,11 @@ def QueryEntities(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/QueryEntities', - orchestrator__service__pb2.QueryEntitiesRequest.SerializeToString, - orchestrator__service__pb2.QueryEntitiesResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/QueryEntities', + durabletask_dot_internal_dot_orchestrator__service__pb2.QueryEntitiesRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.QueryEntitiesResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def CleanEntityStorage(request, @@ -1004,18 +771,8 @@ def CleanEntityStorage(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/CleanEntityStorage', - orchestrator__service__pb2.CleanEntityStorageRequest.SerializeToString, - orchestrator__service__pb2.CleanEntityStorageResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/CleanEntityStorage', + durabletask_dot_internal_dot_orchestrator__service__pb2.CleanEntityStorageRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.CleanEntityStorageResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/durabletask/internal/shared.py b/durabletask/internal/shared.py index 80c3d56..400529a 100644 --- a/durabletask/internal/shared.py +++ b/durabletask/internal/shared.py @@ -5,7 +5,7 @@ import json import logging from types import SimpleNamespace -from typing import Any, Dict, List, Tuple, Union +from typing import Any, Optional import grpc @@ -20,7 +20,10 @@ def get_default_host_address() -> str: return "localhost:4001" -def get_grpc_channel(host_address: Union[str, None], metadata: Union[List[Tuple[str, str]], None], secure_channel: bool = False) -> grpc.Channel: +def get_grpc_channel( + host_address: Optional[str], + metadata: Optional[list[tuple[str, str]]], + secure_channel: bool = False) -> grpc.Channel: if host_address is None: host_address = get_default_host_address() @@ -36,8 +39,8 @@ def get_grpc_channel(host_address: Union[str, None], metadata: Union[List[Tuple[ def get_logger( name_suffix: str, - log_handler: Union[logging.Handler, None] = None, - log_formatter: Union[logging.Formatter, None] = None) -> logging.Logger: + log_handler: Optional[logging.Handler] = None, + log_formatter: Optional[logging.Formatter] = None) -> logging.Logger: logger = logging.Logger(f"durabletask-{name_suffix}") # Add a default log handler if none is provided @@ -78,7 +81,7 @@ def default(self, obj): if dataclasses.is_dataclass(obj): # Dataclasses are not serializable by default, so we convert them to a dict and mark them for # automatic deserialization by the receiver - d = dataclasses.asdict(obj) + d = dataclasses.asdict(obj) # type: ignore d[AUTO_SERIALIZED] = True return d elif isinstance(obj, SimpleNamespace): @@ -94,7 +97,7 @@ class InternalJSONDecoder(json.JSONDecoder): def __init__(self, *args, **kwargs): super().__init__(object_hook=self.dict_to_object, *args, **kwargs) - def dict_to_object(self, d: Dict[str, Any]): + def dict_to_object(self, d: dict[str, Any]): # If the object was serialized by the InternalJSONEncoder, deserialize it as a SimpleNamespace if d.pop(AUTO_SERIALIZED, False): return SimpleNamespace(**d) diff --git a/durabletask/task.py b/durabletask/task.py index a9f85de..a40602b 100644 --- a/durabletask/task.py +++ b/durabletask/task.py @@ -7,8 +7,7 @@ import math from abc import ABC, abstractmethod from datetime import datetime, timedelta -from typing import (Any, Callable, Generator, Generic, List, Optional, TypeVar, - Union) +from typing import Any, Callable, Generator, Generic, Optional, TypeVar, Union import durabletask.internal.helpers as pbh import durabletask.internal.orchestrator_service_pb2 as pb @@ -72,8 +71,13 @@ def is_replaying(self) -> bool: pass @abstractmethod - def set_custom_status(self, custom_status: str) -> None: - """Set the custom status. + def set_custom_status(self, custom_status: Any) -> None: + """Set the orchestration instance's custom status. + + Parameters + ---------- + custom_status: Any + A JSON-serializable custom status value to set. """ pass @@ -254,9 +258,9 @@ def get_exception(self) -> TaskFailedError: class CompositeTask(Task[T]): """A task that is composed of other tasks.""" - _tasks: List[Task] + _tasks: list[Task] - def __init__(self, tasks: List[Task]): + def __init__(self, tasks: list[Task]): super().__init__() self._tasks = tasks self._completed_tasks = 0 @@ -266,17 +270,17 @@ def __init__(self, tasks: List[Task]): if task.is_complete: self.on_child_completed(task) - def get_tasks(self) -> List[Task]: + def get_tasks(self) -> list[Task]: return self._tasks @abstractmethod def on_child_completed(self, task: Task[T]): pass -class WhenAllTask(CompositeTask[List[T]]): +class WhenAllTask(CompositeTask[list[T]]): """A task that completes when all of its child tasks complete.""" - def __init__(self, tasks: List[Task[T]]): + def __init__(self, tasks: list[Task[T]]): super().__init__(tasks) self._completed_tasks = 0 self._failed_tasks = 0 @@ -340,7 +344,7 @@ def __init__(self, retry_policy: RetryPolicy, action: pb.OrchestratorAction, def increment_attempt_count(self) -> None: self._attempt_count += 1 - def compute_next_delay(self) -> Union[timedelta, None]: + def compute_next_delay(self) -> Optional[timedelta]: if self._attempt_count >= self._retry_policy.max_number_of_attempts: return None @@ -375,7 +379,7 @@ def set_retryable_parent(self, retryable_task: RetryableTask): class WhenAnyTask(CompositeTask[Task]): """A task that completes when any of its child tasks complete.""" - def __init__(self, tasks: List[Task]): + def __init__(self, tasks: list[Task]): super().__init__(tasks) def on_child_completed(self, task: Task): @@ -385,12 +389,12 @@ def on_child_completed(self, task: Task): self._result = task -def when_all(tasks: List[Task[T]]) -> WhenAllTask[T]: +def when_all(tasks: list[Task[T]]) -> WhenAllTask[T]: """Returns a task that completes when all of the provided tasks complete or when one of the tasks fail.""" return WhenAllTask(tasks) -def when_any(tasks: List[Task]) -> WhenAnyTask: +def when_any(tasks: list[Task]) -> WhenAnyTask: """Returns a task that completes when any of the provided tasks complete or fail.""" return WhenAnyTask(tasks) diff --git a/durabletask/worker.py b/durabletask/worker.py index bcc1a30..75e2e37 100644 --- a/durabletask/worker.py +++ b/durabletask/worker.py @@ -6,8 +6,7 @@ from datetime import datetime, timedelta from threading import Event, Thread from types import GeneratorType -from typing import (Any, Dict, Generator, List, Optional, Sequence, Tuple, - TypeVar, Union) +from typing import Any, Generator, Optional, Sequence, TypeVar, Union import grpc from google.protobuf import empty_pb2, wrappers_pb2 @@ -25,8 +24,8 @@ class _Registry: - orchestrators: Dict[str, task.Orchestrator] - activities: Dict[str, task.Activity] + orchestrators: dict[str, task.Orchestrator] + activities: dict[str, task.Activity] def __init__(self): self.orchestrators = {} @@ -86,7 +85,7 @@ class TaskHubGrpcWorker: def __init__(self, *, host_address: Optional[str] = None, - metadata: Optional[List[Tuple[str, str]]] = None, + metadata: Optional[list[tuple[str, str]]] = None, log_handler=None, log_formatter: Optional[logging.Formatter] = None, secure_channel: bool = False): @@ -140,7 +139,7 @@ def run_loop(): # The stream blocks until either a work item is received or the stream is canceled # by another thread (see the stop() method). - for work_item in self._response_stream: + for work_item in self._response_stream: # type: ignore request_type = work_item.WhichOneof('request') self._logger.debug(f'Received "{request_type}" work item') if work_item.HasField('orchestratorRequest'): @@ -189,7 +188,10 @@ def _execute_orchestrator(self, req: pb.OrchestratorRequest, stub: stubs.TaskHub try: executor = _OrchestrationExecutor(self._registry, self._logger) result = executor.execute(req.instanceId, req.pastEvents, req.newEvents) - res = pb.OrchestratorResponse(instanceId=req.instanceId, actions=result.actions, customStatus=wrappers_pb2.StringValue(value=result.custom_status)) + res = pb.OrchestratorResponse( + instanceId=req.instanceId, + actions=result.actions, + customStatus=pbh.get_string_value(result.encoded_custom_status)) except Exception as ex: self._logger.exception(f"An error occurred while trying to execute instance '{req.instanceId}': {ex}") failure_details = pbh.new_failure_details(ex) @@ -232,17 +234,17 @@ def __init__(self, instance_id: str): self._is_replaying = True self._is_complete = False self._result = None - self._pending_actions: Dict[int, pb.OrchestratorAction] = {} - self._pending_tasks: Dict[int, task.CompletableTask] = {} + self._pending_actions: dict[int, pb.OrchestratorAction] = {} + self._pending_tasks: dict[int, task.CompletableTask] = {} self._sequence_number = 0 self._current_utc_datetime = datetime(1000, 1, 1) self._instance_id = instance_id self._completion_status: Optional[pb.OrchestrationStatus] = None - self._received_events: Dict[str, List[Any]] = {} - self._pending_events: Dict[str, List[task.CompletableTask]] = {} + self._received_events: dict[str, list[Any]] = {} + self._pending_events: dict[str, list[task.CompletableTask]] = {} self._new_input: Optional[Any] = None self._save_events = False - self._custom_status: str = "" + self._encoded_custom_status: Optional[str] = None def run(self, generator: Generator[task.Task, Any, Any]): self._generator = generator @@ -314,10 +316,10 @@ def set_continued_as_new(self, new_input: Any, save_events: bool): self._new_input = new_input self._save_events = save_events - def get_actions(self) -> List[pb.OrchestratorAction]: + def get_actions(self) -> list[pb.OrchestratorAction]: if self._completion_status == pb.ORCHESTRATION_STATUS_CONTINUED_AS_NEW: # When continuing-as-new, we only return a single completion action. - carryover_events: Optional[List[pb.HistoryEvent]] = None + carryover_events: Optional[list[pb.HistoryEvent]] = None if self._save_events: carryover_events = [] # We need to save the current set of pending events so that they can be @@ -356,8 +358,8 @@ def is_replaying(self) -> bool: def current_utc_datetime(self, value: datetime): self._current_utc_datetime = value - def set_custom_status(self, custom_status: str) -> None: - self._custom_status = custom_status + def set_custom_status(self, custom_status: Any) -> None: + self._encoded_custom_status = shared.to_json(custom_status) if custom_status is not None else None def create_timer(self, fire_at: Union[datetime, timedelta]) -> task.Task: return self.create_timer_internal(fire_at) @@ -462,12 +464,12 @@ def continue_as_new(self, new_input, *, save_events: bool = False) -> None: class ExecutionResults: - actions: List[pb.OrchestratorAction] - custom_status: str + actions: list[pb.OrchestratorAction] + encoded_custom_status: Optional[str] - def __init__(self, actions: List[pb.OrchestratorAction], custom_status: str): + def __init__(self, actions: list[pb.OrchestratorAction], encoded_custom_status: Optional[str]): self.actions = actions - self.custom_status = custom_status + self.encoded_custom_status = encoded_custom_status class _OrchestrationExecutor: _generator: Optional[task.Orchestrator] = None @@ -476,7 +478,7 @@ def __init__(self, registry: _Registry, logger: logging.Logger): self._registry = registry self._logger = logger self._is_suspended = False - self._suspended_events: List[pb.HistoryEvent] = [] + self._suspended_events: list[pb.HistoryEvent] = [] def execute(self, instance_id: str, old_events: Sequence[pb.HistoryEvent], new_events: Sequence[pb.HistoryEvent]) -> ExecutionResults: if not new_events: @@ -513,7 +515,7 @@ def execute(self, instance_id: str, old_events: Sequence[pb.HistoryEvent], new_e actions = ctx.get_actions() if self._logger.level <= logging.DEBUG: self._logger.debug(f"{instance_id}: Returning {len(actions)} action(s): {_get_action_summary(actions)}") - return ExecutionResults(actions=actions, custom_status=ctx._custom_status) + return ExecutionResults(actions=actions, encoded_custom_status=ctx._encoded_custom_status) def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEvent) -> None: if self._is_suspended and _is_suspendable(event): @@ -829,7 +831,7 @@ def _get_new_event_summary(new_events: Sequence[pb.HistoryEvent]) -> str: elif len(new_events) == 1: return f"[{new_events[0].WhichOneof('eventType')}]" else: - counts: Dict[str, int] = {} + counts: dict[str, int] = {} for event in new_events: event_type = event.WhichOneof('eventType') counts[event_type] = counts.get(event_type, 0) + 1 @@ -843,7 +845,7 @@ def _get_action_summary(new_actions: Sequence[pb.OrchestratorAction]) -> str: elif len(new_actions) == 1: return f"[{new_actions[0].WhichOneof('orchestratorActionType')}]" else: - counts: Dict[str, int] = {} + counts: dict[str, int] = {} for action in new_actions: action_type = action.WhichOneof('orchestratorActionType') counts[action_type] = counts.get(action_type, 0) + 1 diff --git a/examples/fanout_fanin.py b/examples/fanout_fanin.py index 3e054df..c53744f 100644 --- a/examples/fanout_fanin.py +++ b/examples/fanout_fanin.py @@ -3,12 +3,11 @@ to complete, and prints an aggregate summary of the outputs.""" import random import time -from typing import List from durabletask import client, task, worker -def get_work_items(ctx: task.ActivityContext, _) -> List[str]: +def get_work_items(ctx: task.ActivityContext, _) -> list[str]: """Activity function that returns a list of work items""" # return a random number of work items count = random.randint(2, 10) @@ -32,11 +31,11 @@ def orchestrator(ctx: task.OrchestrationContext, _): activity functions in parallel, waits for them all to complete, and prints an aggregate summary of the outputs""" - work_items: List[str] = yield ctx.call_activity(get_work_items) + work_items: list[str] = yield ctx.call_activity(get_work_items) # execute the work-items in parallel and wait for them all to return tasks = [ctx.call_activity(process_work_item, input=item) for item in work_items] - results: List[int] = yield task.when_all(tasks) + results: list[int] = yield task.when_all(tasks) # return an aggregate summary of the results return { diff --git a/pyproject.toml b/pyproject.toml index ed94136..9c05d86 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -21,7 +21,7 @@ classifiers = [ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", ] -requires-python = ">=3.8" +requires-python = ">=3.9" license = {file = "LICENSE"} readme = "README.md" dependencies = [ diff --git a/requirements.txt b/requirements.txt index 641cee7..a31419b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ autopep8 -grpcio -grpcio-tools +grpcio>=1.60.0 # 1.60.0 is the version introducing protobuf 1.25.X support, newer versions are backwards compatible +protobuf pytest -pytest-cov \ No newline at end of file +pytest-cov diff --git a/tests/test_activity_executor.py b/tests/test_activity_executor.py index b9a4bd4..bfc8eaf 100644 --- a/tests/test_activity_executor.py +++ b/tests/test_activity_executor.py @@ -3,7 +3,7 @@ import json import logging -from typing import Any, Tuple, Union +from typing import Any, Optional, Tuple from durabletask import task, worker @@ -40,7 +40,7 @@ def test_activity(ctx: task.ActivityContext, _): executor, _ = _get_activity_executor(test_activity) - caught_exception: Union[Exception, None] = None + caught_exception: Optional[Exception] = None try: executor.execute(TEST_INSTANCE_ID, "Bogus", TEST_TASK_ID, None) except Exception as ex: diff --git a/tests/test_orchestration_e2e.py b/tests/test_orchestration_e2e.py index 1cfc520..d3d7f0b 100644 --- a/tests/test_orchestration_e2e.py +++ b/tests/test_orchestration_e2e.py @@ -466,4 +466,4 @@ def empty_orchestrator(ctx: task.OrchestrationContext, _): assert state.runtime_status == client.OrchestrationStatus.COMPLETED assert state.serialized_input is None assert state.serialized_output is None - assert state.serialized_custom_status is "\"foobaz\"" + assert state.serialized_custom_status == "\"foobaz\"" diff --git a/tests/test_orchestration_executor.py b/tests/test_orchestration_executor.py index 95eab0b..cb77c81 100644 --- a/tests/test_orchestration_executor.py +++ b/tests/test_orchestration_executor.py @@ -4,7 +4,6 @@ import json import logging from datetime import datetime, timedelta -from typing import List import pytest @@ -1184,7 +1183,7 @@ def orchestrator(ctx: task.OrchestrationContext, _): assert str(ex) in complete_action.failureDetails.errorMessage -def get_and_validate_single_complete_orchestration_action(actions: List[pb.OrchestratorAction]) -> pb.CompleteOrchestrationAction: +def get_and_validate_single_complete_orchestration_action(actions: list[pb.OrchestratorAction]) -> pb.CompleteOrchestrationAction: assert len(actions) == 1 assert type(actions[0]) is pb.OrchestratorAction assert actions[0].HasField("completeOrchestration") From 6ef52fc2b0103c0903ae24b75330af4f8c40fce6 Mon Sep 17 00:00:00 2001 From: Elena Kolevska Date: Mon, 13 Jan 2025 16:29:54 +0000 Subject: [PATCH 05/81] sync from upstream (#7) Signed-off-by: Elena Kolevska Signed-off-by: Elena Kolevska --- CHANGELOG.md | 3 ++- README.md | 2 +- durabletask/internal/shared.py | 17 ++++++++++++ tests/test_client.py | 49 +++++++++++++++++++++++++++++++++- 4 files changed, 68 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a09078d..286312c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,7 +14,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changes -- Protos are compiled with gRPC 1.62.3 / protobuf 3.25.X instead of the latest release. This ensures compatibility with a wider range of grpcio versions for better compatibility with other packages / libraries. +- Protos are compiled with gRPC 1.62.3 / protobuf 3.25.X instead of the latest release. This ensures compatibility with a wider range of grpcio versions for better compatibility with other packages / libraries ([#36](https://github.com/microsoft/durabletask-python/pull/36)) - by [@berndverst](https://github.com/berndverst) +- Http and grpc protocols and their secure variants are stripped from the host name parameter if provided. Secure mode is enabled if the protocol provided is https or grpcs ([#38](https://github.com/microsoft/durabletask-python/pull/38) - by [@berndverst)(https://github.com/berndverst) ### Updates diff --git a/README.md b/README.md index 443ea99..94be7ec 100644 --- a/README.md +++ b/README.md @@ -134,7 +134,7 @@ Orchestrations can specify retry policies for activities and sub-orchestrations. ### Prerequisites -- Python 3.8 +- Python 3.9 - A Durable Task-compatible sidecar, like [Dapr Workflow](https://docs.dapr.io/developing-applications/building-blocks/workflow/workflow-overview/) ### Installing the Durable Task Python client SDK diff --git a/durabletask/internal/shared.py b/durabletask/internal/shared.py index 400529a..c4f3aa4 100644 --- a/durabletask/internal/shared.py +++ b/durabletask/internal/shared.py @@ -15,6 +15,9 @@ # and should be deserialized as a SimpleNamespace AUTO_SERIALIZED = "__durabletask_autoobject__" +SECURE_PROTOCOLS = ["https://", "grpcs://"] +INSECURE_PROTOCOLS = ["http://", "grpc://"] + def get_default_host_address() -> str: return "localhost:4001" @@ -27,6 +30,20 @@ def get_grpc_channel( if host_address is None: host_address = get_default_host_address() + for protocol in SECURE_PROTOCOLS: + if host_address.lower().startswith(protocol): + secure_channel = True + # remove the protocol from the host name + host_address = host_address[len(protocol):] + break + + for protocol in INSECURE_PROTOCOLS: + if host_address.lower().startswith(protocol): + secure_channel = False + # remove the protocol from the host name + host_address = host_address[len(protocol):] + break + if secure_channel: channel = grpc.secure_channel(host_address, grpc.ssl_channel_credentials()) else: diff --git a/tests/test_client.py b/tests/test_client.py index b27f8e3..caacf65 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -1,4 +1,4 @@ -from unittest.mock import patch +from unittest.mock import patch, ANY from durabletask.internal.shared import (DefaultClientInterceptorImpl, get_default_host_address, @@ -39,3 +39,50 @@ def test_get_grpc_channel_with_metadata(): assert args[0] == mock_channel.return_value assert isinstance(args[1], DefaultClientInterceptorImpl) assert args[1]._metadata == METADATA + + +def test_grpc_channel_with_host_name_protocol_stripping(): + with patch('grpc.insecure_channel') as mock_insecure_channel, patch( + 'grpc.secure_channel') as mock_secure_channel: + + host_name = "myserver.com:1234" + + prefix = "grpc://" + get_grpc_channel(prefix + host_name, METADATA) + mock_insecure_channel.assert_called_with(host_name) + + prefix = "http://" + get_grpc_channel(prefix + host_name, METADATA) + mock_insecure_channel.assert_called_with(host_name) + + prefix = "HTTP://" + get_grpc_channel(prefix + host_name, METADATA) + mock_insecure_channel.assert_called_with(host_name) + + prefix = "GRPC://" + get_grpc_channel(prefix + host_name, METADATA) + mock_insecure_channel.assert_called_with(host_name) + + prefix = "" + get_grpc_channel(prefix + host_name, METADATA) + mock_insecure_channel.assert_called_with(host_name) + + prefix = "grpcs://" + get_grpc_channel(prefix + host_name, METADATA) + mock_secure_channel.assert_called_with(host_name, ANY) + + prefix = "https://" + get_grpc_channel(prefix + host_name, METADATA) + mock_secure_channel.assert_called_with(host_name, ANY) + + prefix = "HTTPS://" + get_grpc_channel(prefix + host_name, METADATA) + mock_secure_channel.assert_called_with(host_name, ANY) + + prefix = "GRPCS://" + get_grpc_channel(prefix + host_name, METADATA) + mock_secure_channel.assert_called_with(host_name, ANY) + + prefix = "" + get_grpc_channel(prefix + host_name, METADATA, True) + mock_secure_channel.assert_called_with(host_name, ANY) \ No newline at end of file From 1982ca4a76894033be3c8060b0574ea29b84d3b0 Mon Sep 17 00:00:00 2001 From: Bernd Verst Date: Fri, 17 Jan 2025 09:39:03 -0800 Subject: [PATCH 06/81] Improve Proto Generation: Download proto file directly instead of via submodule (#39) Signed-off-by: Elena Kolevska --- CHANGELOG.md | 1 + Makefile | 3 +- README.md | 10 +- durabletask/internal/PROTO_SOURCE_COMMIT_HASH | 1 + .../internal/orchestrator_service_pb2.py | 352 +++++++++--------- .../internal/orchestrator_service_pb2.pyi | 20 +- submodules/durabletask-protobuf | 1 - 7 files changed, 196 insertions(+), 192 deletions(-) create mode 100644 durabletask/internal/PROTO_SOURCE_COMMIT_HASH delete mode 160000 submodules/durabletask-protobuf diff --git a/CHANGELOG.md b/CHANGELOG.md index 286312c..ee736f0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Protos are compiled with gRPC 1.62.3 / protobuf 3.25.X instead of the latest release. This ensures compatibility with a wider range of grpcio versions for better compatibility with other packages / libraries ([#36](https://github.com/microsoft/durabletask-python/pull/36)) - by [@berndverst](https://github.com/berndverst) - Http and grpc protocols and their secure variants are stripped from the host name parameter if provided. Secure mode is enabled if the protocol provided is https or grpcs ([#38](https://github.com/microsoft/durabletask-python/pull/38) - by [@berndverst)(https://github.com/berndverst) +- Improve ProtoGen by downloading proto file directly instead of using submodule ([#39](https://github.com/microsoft/durabletask-python/pull/39) - by [@berndverst](https://github.com/berndverst) ### Updates diff --git a/Makefile b/Makefile index 68a9b89..5a05f33 100644 --- a/Makefile +++ b/Makefile @@ -11,7 +11,8 @@ install: python3 -m pip install . gen-proto: - cp ./submodules/durabletask-protobuf/protos/orchestrator_service.proto durabletask/internal/orchestrator_service.proto + curl -o durabletask/internal/orchestrator_service.proto https://raw.githubusercontent.com/microsoft/durabletask-protobuf/refs/heads/main/protos/orchestrator_service.proto + curl -H "Accept: application/vnd.github.v3+json" "https://api.github.com/repos/microsoft/durabletask-protobuf/commits?path=protos/orchestrator_service.proto&sha=main&per_page=1" | jq -r '.[0].sha' >> durabletask/internal/PROTO_SOURCE_COMMIT_HASH python3 -m grpc_tools.protoc --proto_path=. --python_out=. --pyi_out=. --grpc_python_out=. ./durabletask/internal/orchestrator_service.proto rm durabletask/internal/*.proto diff --git a/README.md b/README.md index 94be7ec..b11fc29 100644 --- a/README.md +++ b/README.md @@ -161,19 +161,13 @@ The following is more information about how to develop this project. Note that d ### Generating protobufs -Protobuf definitions are stored in the [./submodules/durabletask-proto](./submodules/durabletask-proto) directory, which is a submodule. To update the submodule, run the following command from the project root: - -```sh -git submodule update --init -``` - -Once the submodule is available, the corresponding source code can be regenerated using the following command from the project root: - ```sh pip3 install -r dev-requirements.txt make gen-proto ``` +This will download the `orchestrator_service.proto` from the `microsoft/durabletask-protobuf` repo and compile it using `grpcio-tools`. The version of the source proto file that was downloaded can be found in the file `durabletask/internal/PROTO_SOURCE_COMMIT_HASH`. + ### Running unit tests Unit tests can be run using the following command from the project root. Unit tests _don't_ require a sidecar process to be running. diff --git a/durabletask/internal/PROTO_SOURCE_COMMIT_HASH b/durabletask/internal/PROTO_SOURCE_COMMIT_HASH new file mode 100644 index 0000000..ddbd31a --- /dev/null +++ b/durabletask/internal/PROTO_SOURCE_COMMIT_HASH @@ -0,0 +1 @@ +443b333f4f65a438dc9eb4f090560d232afec4b7 diff --git a/durabletask/internal/orchestrator_service_pb2.py b/durabletask/internal/orchestrator_service_pb2.py index 9c92eac..44b4a32 100644 --- a/durabletask/internal/orchestrator_service_pb2.py +++ b/durabletask/internal/orchestrator_service_pb2.py @@ -18,7 +18,7 @@ from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n/durabletask/internal/orchestrator_service.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1bgoogle/protobuf/empty.proto\"^\n\x15OrchestrationInstance\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xed\x01\n\x0f\x41\x63tivityRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\x12\x0e\n\x06taskId\x18\x05 \x01(\x05\x12)\n\x12parentTraceContext\x18\x06 \x01(\x0b\x32\r.TraceContext\"\x91\x01\n\x10\x41\x63tivityResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0e\n\x06taskId\x18\x02 \x01(\x05\x12,\n\x06result\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\"\xb2\x01\n\x12TaskFailureDetails\x12\x11\n\terrorType\x18\x01 \x01(\t\x12\x14\n\x0c\x65rrorMessage\x18\x02 \x01(\t\x12\x30\n\nstackTrace\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x0cinnerFailure\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\x12\x16\n\x0eisNonRetriable\x18\x05 \x01(\x08\"\xbf\x01\n\x12ParentInstanceInfo\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12*\n\x04name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\"i\n\x0cTraceContext\x12\x13\n\x0btraceParent\x18\x01 \x01(\t\x12\x12\n\x06spanID\x18\x02 \x01(\tB\x02\x18\x01\x12\x30\n\ntraceState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x88\x03\n\x15\x45xecutionStartedEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\x12+\n\x0eparentInstance\x18\x05 \x01(\x0b\x32\x13.ParentInstanceInfo\x12;\n\x17scheduledStartTimestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12)\n\x12parentTraceContext\x18\x07 \x01(\x0b\x32\r.TraceContext\x12\x39\n\x13orchestrationSpanID\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xa7\x01\n\x17\x45xecutionCompletedEvent\x12\x31\n\x13orchestrationStatus\x18\x01 \x01(\x0e\x32\x14.OrchestrationStatus\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x03 \x01(\x0b\x32\x13.TaskFailureDetails\"X\n\x18\x45xecutionTerminatedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x0f\n\x07recurse\x18\x02 \x01(\x08\"\xa9\x01\n\x12TaskScheduledEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x12parentTraceContext\x18\x04 \x01(\x0b\x32\r.TraceContext\"[\n\x12TaskCompletedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"W\n\x0fTaskFailedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\"\xcf\x01\n$SubOrchestrationInstanceCreatedEvent\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x12parentTraceContext\x18\x05 \x01(\x0b\x32\r.TraceContext\"o\n&SubOrchestrationInstanceCompletedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"k\n#SubOrchestrationInstanceFailedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\"?\n\x11TimerCreatedEvent\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"N\n\x0fTimerFiredEvent\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0f\n\x07timerId\x18\x02 \x01(\x05\"\x1a\n\x18OrchestratorStartedEvent\"\x1c\n\x1aOrchestratorCompletedEvent\"_\n\x0e\x45ventSentEvent\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"M\n\x10\x45ventRaisedEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x05input\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\":\n\x0cGenericEvent\x12*\n\x04\x64\x61ta\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"D\n\x11HistoryStateEvent\x12/\n\x12orchestrationState\x18\x01 \x01(\x0b\x32\x13.OrchestrationState\"A\n\x12\x43ontinueAsNewEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"F\n\x17\x45xecutionSuspendedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"D\n\x15\x45xecutionResumedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x86\t\n\x0cHistoryEvent\x12\x0f\n\x07\x65ventId\x18\x01 \x01(\x05\x12-\n\ttimestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x10\x65xecutionStarted\x18\x03 \x01(\x0b\x32\x16.ExecutionStartedEventH\x00\x12\x36\n\x12\x65xecutionCompleted\x18\x04 \x01(\x0b\x32\x18.ExecutionCompletedEventH\x00\x12\x38\n\x13\x65xecutionTerminated\x18\x05 \x01(\x0b\x32\x19.ExecutionTerminatedEventH\x00\x12,\n\rtaskScheduled\x18\x06 \x01(\x0b\x32\x13.TaskScheduledEventH\x00\x12,\n\rtaskCompleted\x18\x07 \x01(\x0b\x32\x13.TaskCompletedEventH\x00\x12&\n\ntaskFailed\x18\x08 \x01(\x0b\x32\x10.TaskFailedEventH\x00\x12P\n\x1fsubOrchestrationInstanceCreated\x18\t \x01(\x0b\x32%.SubOrchestrationInstanceCreatedEventH\x00\x12T\n!subOrchestrationInstanceCompleted\x18\n \x01(\x0b\x32\'.SubOrchestrationInstanceCompletedEventH\x00\x12N\n\x1esubOrchestrationInstanceFailed\x18\x0b \x01(\x0b\x32$.SubOrchestrationInstanceFailedEventH\x00\x12*\n\x0ctimerCreated\x18\x0c \x01(\x0b\x32\x12.TimerCreatedEventH\x00\x12&\n\ntimerFired\x18\r \x01(\x0b\x32\x10.TimerFiredEventH\x00\x12\x38\n\x13orchestratorStarted\x18\x0e \x01(\x0b\x32\x19.OrchestratorStartedEventH\x00\x12<\n\x15orchestratorCompleted\x18\x0f \x01(\x0b\x32\x1b.OrchestratorCompletedEventH\x00\x12$\n\teventSent\x18\x10 \x01(\x0b\x32\x0f.EventSentEventH\x00\x12(\n\x0b\x65ventRaised\x18\x11 \x01(\x0b\x32\x11.EventRaisedEventH\x00\x12%\n\x0cgenericEvent\x18\x12 \x01(\x0b\x32\r.GenericEventH\x00\x12*\n\x0chistoryState\x18\x13 \x01(\x0b\x32\x12.HistoryStateEventH\x00\x12,\n\rcontinueAsNew\x18\x14 \x01(\x0b\x32\x13.ContinueAsNewEventH\x00\x12\x36\n\x12\x65xecutionSuspended\x18\x15 \x01(\x0b\x32\x18.ExecutionSuspendedEventH\x00\x12\x32\n\x10\x65xecutionResumed\x18\x16 \x01(\x0b\x32\x16.ExecutionResumedEventH\x00\x42\x0b\n\teventType\"~\n\x12ScheduleTaskAction\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x9c\x01\n\x1c\x43reateSubOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"?\n\x11\x43reateTimerAction\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"u\n\x0fSendEventAction\x12(\n\x08instance\x18\x01 \x01(\x0b\x32\x16.OrchestrationInstance\x12\x0c\n\x04name\x18\x02 \x01(\t\x12*\n\x04\x64\x61ta\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xb4\x02\n\x1b\x43ompleteOrchestrationAction\x12\x31\n\x13orchestrationStatus\x18\x01 \x01(\x0e\x32\x14.OrchestrationStatus\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07\x64\x65tails\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\nnewVersion\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12&\n\x0f\x63\x61rryoverEvents\x18\x05 \x03(\x0b\x32\r.HistoryEvent\x12+\n\x0e\x66\x61ilureDetails\x18\x06 \x01(\x0b\x32\x13.TaskFailureDetails\"q\n\x1cTerminateOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x0f\n\x07recurse\x18\x03 \x01(\x08\"\xfa\x02\n\x12OrchestratorAction\x12\n\n\x02id\x18\x01 \x01(\x05\x12+\n\x0cscheduleTask\x18\x02 \x01(\x0b\x32\x13.ScheduleTaskActionH\x00\x12?\n\x16\x63reateSubOrchestration\x18\x03 \x01(\x0b\x32\x1d.CreateSubOrchestrationActionH\x00\x12)\n\x0b\x63reateTimer\x18\x04 \x01(\x0b\x32\x12.CreateTimerActionH\x00\x12%\n\tsendEvent\x18\x05 \x01(\x0b\x32\x10.SendEventActionH\x00\x12=\n\x15\x63ompleteOrchestration\x18\x06 \x01(\x0b\x32\x1c.CompleteOrchestrationActionH\x00\x12?\n\x16terminateOrchestration\x18\x07 \x01(\x0b\x32\x1d.TerminateOrchestrationActionH\x00\x42\x18\n\x16orchestratorActionType\"\xda\x01\n\x13OrchestratorRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12!\n\npastEvents\x18\x03 \x03(\x0b\x32\r.HistoryEvent\x12 \n\tnewEvents\x18\x04 \x03(\x0b\x32\r.HistoryEvent\x12\x37\n\x10\x65ntityParameters\x18\x05 \x01(\x0b\x32\x1d.OrchestratorEntityParameters\"\x84\x01\n\x14OrchestratorResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12$\n\x07\x61\x63tions\x18\x02 \x03(\x0b\x32\x13.OrchestratorAction\x12\x32\n\x0c\x63ustomStatus\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xa3\x03\n\x15\x43reateInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12;\n\x17scheduledStartTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12?\n\x1aorchestrationIdReusePolicy\x18\x06 \x01(\x0b\x32\x1b.OrchestrationIdReusePolicy\x12\x31\n\x0b\x65xecutionId\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x04tags\x18\x08 \x03(\x0b\x32 .CreateInstanceRequest.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"w\n\x1aOrchestrationIdReusePolicy\x12-\n\x0foperationStatus\x18\x01 \x03(\x0e\x32\x14.OrchestrationStatus\x12*\n\x06\x61\x63tion\x18\x02 \x01(\x0e\x32\x1a.CreateOrchestrationAction\",\n\x16\x43reateInstanceResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\"E\n\x12GetInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x1b\n\x13getInputsAndOutputs\x18\x02 \x01(\x08\"V\n\x13GetInstanceResponse\x12\x0e\n\x06\x65xists\x18\x01 \x01(\x08\x12/\n\x12orchestrationState\x18\x02 \x01(\x0b\x32\x13.OrchestrationState\"Y\n\x15RewindInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x18\n\x16RewindInstanceResponse\"\xa4\x05\n\x12OrchestrationState\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\x13orchestrationStatus\x18\x04 \x01(\x0e\x32\x14.OrchestrationStatus\x12;\n\x17scheduledStartTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x34\n\x10\x63reatedTimestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x38\n\x14lastUpdatedTimestamp\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\x05input\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12,\n\x06output\x18\t \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0c\x63ustomStatus\x18\n \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x0b \x01(\x0b\x32\x13.TaskFailureDetails\x12\x31\n\x0b\x65xecutionId\x18\x0c \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x12\x63ompletedTimestamp\x18\r \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x36\n\x10parentInstanceId\x18\x0e \x01(\x0b\x32\x1c.google.protobuf.StringValue\"b\n\x11RaiseEventRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x14\n\x12RaiseEventResponse\"g\n\x10TerminateRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06output\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x11\n\trecursive\x18\x03 \x01(\x08\"\x13\n\x11TerminateResponse\"R\n\x0eSuspendRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x11\n\x0fSuspendResponse\"Q\n\rResumeRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x10\n\x0eResumeResponse\"6\n\x15QueryInstancesRequest\x12\x1d\n\x05query\x18\x01 \x01(\x0b\x32\x0e.InstanceQuery\"\x82\x03\n\rInstanceQuery\x12+\n\rruntimeStatus\x18\x01 \x03(\x0e\x32\x14.OrchestrationStatus\x12\x33\n\x0f\x63reatedTimeFrom\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rcreatedTimeTo\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0ctaskHubNames\x18\x04 \x03(\x0b\x32\x1c.google.protobuf.StringValue\x12\x18\n\x10maxInstanceCount\x18\x05 \x01(\x05\x12\x37\n\x11\x63ontinuationToken\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10instanceIdPrefix\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1d\n\x15\x66\x65tchInputsAndOutputs\x18\x08 \x01(\x08\"\x82\x01\n\x16QueryInstancesResponse\x12/\n\x12orchestrationState\x18\x01 \x03(\x0b\x32\x13.OrchestrationState\x12\x37\n\x11\x63ontinuationToken\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x80\x01\n\x15PurgeInstancesRequest\x12\x14\n\ninstanceId\x18\x01 \x01(\tH\x00\x12\x33\n\x13purgeInstanceFilter\x18\x02 \x01(\x0b\x32\x14.PurgeInstanceFilterH\x00\x12\x11\n\trecursive\x18\x03 \x01(\x08\x42\t\n\x07request\"\xaa\x01\n\x13PurgeInstanceFilter\x12\x33\n\x0f\x63reatedTimeFrom\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rcreatedTimeTo\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\rruntimeStatus\x18\x03 \x03(\x0e\x32\x14.OrchestrationStatus\"6\n\x16PurgeInstancesResponse\x12\x1c\n\x14\x64\x65letedInstanceCount\x18\x01 \x01(\x05\"0\n\x14\x43reateTaskHubRequest\x12\x18\n\x10recreateIfExists\x18\x01 \x01(\x08\"\x17\n\x15\x43reateTaskHubResponse\"\x16\n\x14\x44\x65leteTaskHubRequest\"\x17\n\x15\x44\x65leteTaskHubResponse\"\xaa\x01\n\x13SignalEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x11\n\trequestId\x18\x04 \x01(\t\x12\x31\n\rscheduledTime\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\x16\n\x14SignalEntityResponse\"<\n\x10GetEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x14\n\x0cincludeState\x18\x02 \x01(\x08\"D\n\x11GetEntityResponse\x12\x0e\n\x06\x65xists\x18\x01 \x01(\x08\x12\x1f\n\x06\x65ntity\x18\x02 \x01(\x0b\x32\x0f.EntityMetadata\"\xcb\x02\n\x0b\x45ntityQuery\x12:\n\x14instanceIdStartsWith\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x34\n\x10lastModifiedFrom\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0elastModifiedTo\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x14\n\x0cincludeState\x18\x04 \x01(\x08\x12\x18\n\x10includeTransient\x18\x05 \x01(\x08\x12-\n\x08pageSize\x18\x06 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12\x37\n\x11\x63ontinuationToken\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"3\n\x14QueryEntitiesRequest\x12\x1b\n\x05query\x18\x01 \x01(\x0b\x32\x0c.EntityQuery\"s\n\x15QueryEntitiesResponse\x12!\n\x08\x65ntities\x18\x01 \x03(\x0b\x32\x0f.EntityMetadata\x12\x37\n\x11\x63ontinuationToken\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xdb\x01\n\x0e\x45ntityMetadata\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x34\n\x10lastModifiedTime\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x18\n\x10\x62\x61\x63klogQueueSize\x18\x03 \x01(\x05\x12.\n\x08lockedBy\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x0fserializedState\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x8f\x01\n\x19\x43leanEntityStorageRequest\x12\x37\n\x11\x63ontinuationToken\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1b\n\x13removeEmptyEntities\x18\x02 \x01(\x08\x12\x1c\n\x14releaseOrphanedLocks\x18\x03 \x01(\x08\"\x92\x01\n\x1a\x43leanEntityStorageResponse\x12\x37\n\x11\x63ontinuationToken\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1c\n\x14\x65mptyEntitiesRemoved\x18\x02 \x01(\x05\x12\x1d\n\x15orphanedLocksReleased\x18\x03 \x01(\x05\"]\n\x1cOrchestratorEntityParameters\x12=\n\x1a\x65ntityMessageReorderWindow\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\"\x82\x01\n\x12\x45ntityBatchRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65ntityState\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12%\n\noperations\x18\x03 \x03(\x0b\x32\x11.OperationRequest\"\xb9\x01\n\x11\x45ntityBatchResult\x12!\n\x07results\x18\x01 \x03(\x0b\x32\x10.OperationResult\x12!\n\x07\x61\x63tions\x18\x02 \x03(\x0b\x32\x10.OperationAction\x12\x31\n\x0b\x65ntityState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\"e\n\x10OperationRequest\x12\x11\n\toperation\x18\x01 \x01(\t\x12\x11\n\trequestId\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"w\n\x0fOperationResult\x12*\n\x07success\x18\x01 \x01(\x0b\x32\x17.OperationResultSuccessH\x00\x12*\n\x07\x66\x61ilure\x18\x02 \x01(\x0b\x32\x17.OperationResultFailureH\x00\x42\x0c\n\nresultType\"F\n\x16OperationResultSuccess\x12,\n\x06result\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"E\n\x16OperationResultFailure\x12+\n\x0e\x66\x61ilureDetails\x18\x01 \x01(\x0b\x32\x13.TaskFailureDetails\"\x9c\x01\n\x0fOperationAction\x12\n\n\x02id\x18\x01 \x01(\x05\x12\'\n\nsendSignal\x18\x02 \x01(\x0b\x32\x11.SendSignalActionH\x00\x12=\n\x15startNewOrchestration\x18\x03 \x01(\x0b\x32\x1c.StartNewOrchestrationActionH\x00\x42\x15\n\x13operationActionType\"\x94\x01\n\x10SendSignalAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rscheduledTime\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xce\x01\n\x1bStartNewOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rscheduledTime\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\x15\n\x13GetWorkItemsRequest\"\xe1\x01\n\x08WorkItem\x12\x33\n\x13orchestratorRequest\x18\x01 \x01(\x0b\x32\x14.OrchestratorRequestH\x00\x12+\n\x0f\x61\x63tivityRequest\x18\x02 \x01(\x0b\x32\x10.ActivityRequestH\x00\x12,\n\rentityRequest\x18\x03 \x01(\x0b\x32\x13.EntityBatchRequestH\x00\x12!\n\nhealthPing\x18\x04 \x01(\x0b\x32\x0b.HealthPingH\x00\x12\x17\n\x0f\x63ompletionToken\x18\n \x01(\tB\t\n\x07request\"\x16\n\x14\x43ompleteTaskResponse\"\x0c\n\nHealthPing*\xb5\x02\n\x13OrchestrationStatus\x12 \n\x1cORCHESTRATION_STATUS_RUNNING\x10\x00\x12\"\n\x1eORCHESTRATION_STATUS_COMPLETED\x10\x01\x12)\n%ORCHESTRATION_STATUS_CONTINUED_AS_NEW\x10\x02\x12\x1f\n\x1bORCHESTRATION_STATUS_FAILED\x10\x03\x12!\n\x1dORCHESTRATION_STATUS_CANCELED\x10\x04\x12#\n\x1fORCHESTRATION_STATUS_TERMINATED\x10\x05\x12 \n\x1cORCHESTRATION_STATUS_PENDING\x10\x06\x12\"\n\x1eORCHESTRATION_STATUS_SUSPENDED\x10\x07*A\n\x19\x43reateOrchestrationAction\x12\t\n\x05\x45RROR\x10\x00\x12\n\n\x06IGNORE\x10\x01\x12\r\n\tTERMINATE\x10\x02\x32\xfc\n\n\x15TaskHubSidecarService\x12\x37\n\x05Hello\x12\x16.google.protobuf.Empty\x1a\x16.google.protobuf.Empty\x12@\n\rStartInstance\x12\x16.CreateInstanceRequest\x1a\x17.CreateInstanceResponse\x12\x38\n\x0bGetInstance\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x41\n\x0eRewindInstance\x12\x16.RewindInstanceRequest\x1a\x17.RewindInstanceResponse\x12\x41\n\x14WaitForInstanceStart\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x46\n\x19WaitForInstanceCompletion\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x35\n\nRaiseEvent\x12\x12.RaiseEventRequest\x1a\x13.RaiseEventResponse\x12:\n\x11TerminateInstance\x12\x11.TerminateRequest\x1a\x12.TerminateResponse\x12\x34\n\x0fSuspendInstance\x12\x0f.SuspendRequest\x1a\x10.SuspendResponse\x12\x31\n\x0eResumeInstance\x12\x0e.ResumeRequest\x1a\x0f.ResumeResponse\x12\x41\n\x0eQueryInstances\x12\x16.QueryInstancesRequest\x1a\x17.QueryInstancesResponse\x12\x41\n\x0ePurgeInstances\x12\x16.PurgeInstancesRequest\x1a\x17.PurgeInstancesResponse\x12\x31\n\x0cGetWorkItems\x12\x14.GetWorkItemsRequest\x1a\t.WorkItem0\x01\x12@\n\x14\x43ompleteActivityTask\x12\x11.ActivityResponse\x1a\x15.CompleteTaskResponse\x12H\n\x18\x43ompleteOrchestratorTask\x12\x15.OrchestratorResponse\x1a\x15.CompleteTaskResponse\x12?\n\x12\x43ompleteEntityTask\x12\x12.EntityBatchResult\x1a\x15.CompleteTaskResponse\x12>\n\rCreateTaskHub\x12\x15.CreateTaskHubRequest\x1a\x16.CreateTaskHubResponse\x12>\n\rDeleteTaskHub\x12\x15.DeleteTaskHubRequest\x1a\x16.DeleteTaskHubResponse\x12;\n\x0cSignalEntity\x12\x14.SignalEntityRequest\x1a\x15.SignalEntityResponse\x12\x32\n\tGetEntity\x12\x11.GetEntityRequest\x1a\x12.GetEntityResponse\x12>\n\rQueryEntities\x12\x15.QueryEntitiesRequest\x1a\x16.QueryEntitiesResponse\x12M\n\x12\x43leanEntityStorage\x12\x1a.CleanEntityStorageRequest\x1a\x1b.CleanEntityStorageResponseBf\n1com.microsoft.durabletask.implementation.protobufZ\x10/internal/protos\xaa\x02\x1eMicrosoft.DurableTask.Protobufb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n/durabletask/internal/orchestrator_service.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1bgoogle/protobuf/empty.proto\"^\n\x15OrchestrationInstance\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xed\x01\n\x0f\x41\x63tivityRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\x12\x0e\n\x06taskId\x18\x05 \x01(\x05\x12)\n\x12parentTraceContext\x18\x06 \x01(\x0b\x32\r.TraceContext\"\xaa\x01\n\x10\x41\x63tivityResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0e\n\x06taskId\x18\x02 \x01(\x05\x12,\n\x06result\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\x12\x17\n\x0f\x63ompletionToken\x18\x05 \x01(\t\"\xb2\x01\n\x12TaskFailureDetails\x12\x11\n\terrorType\x18\x01 \x01(\t\x12\x14\n\x0c\x65rrorMessage\x18\x02 \x01(\t\x12\x30\n\nstackTrace\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x0cinnerFailure\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\x12\x16\n\x0eisNonRetriable\x18\x05 \x01(\x08\"\xbf\x01\n\x12ParentInstanceInfo\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12*\n\x04name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\"i\n\x0cTraceContext\x12\x13\n\x0btraceParent\x18\x01 \x01(\t\x12\x12\n\x06spanID\x18\x02 \x01(\tB\x02\x18\x01\x12\x30\n\ntraceState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x88\x03\n\x15\x45xecutionStartedEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\x12+\n\x0eparentInstance\x18\x05 \x01(\x0b\x32\x13.ParentInstanceInfo\x12;\n\x17scheduledStartTimestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12)\n\x12parentTraceContext\x18\x07 \x01(\x0b\x32\r.TraceContext\x12\x39\n\x13orchestrationSpanID\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xa7\x01\n\x17\x45xecutionCompletedEvent\x12\x31\n\x13orchestrationStatus\x18\x01 \x01(\x0e\x32\x14.OrchestrationStatus\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x03 \x01(\x0b\x32\x13.TaskFailureDetails\"X\n\x18\x45xecutionTerminatedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x0f\n\x07recurse\x18\x02 \x01(\x08\"\xa9\x01\n\x12TaskScheduledEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x12parentTraceContext\x18\x04 \x01(\x0b\x32\r.TraceContext\"[\n\x12TaskCompletedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"W\n\x0fTaskFailedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\"\xcf\x01\n$SubOrchestrationInstanceCreatedEvent\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x12parentTraceContext\x18\x05 \x01(\x0b\x32\r.TraceContext\"o\n&SubOrchestrationInstanceCompletedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"k\n#SubOrchestrationInstanceFailedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\"?\n\x11TimerCreatedEvent\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"N\n\x0fTimerFiredEvent\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0f\n\x07timerId\x18\x02 \x01(\x05\"\x1a\n\x18OrchestratorStartedEvent\"\x1c\n\x1aOrchestratorCompletedEvent\"_\n\x0e\x45ventSentEvent\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"M\n\x10\x45ventRaisedEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x05input\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\":\n\x0cGenericEvent\x12*\n\x04\x64\x61ta\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"D\n\x11HistoryStateEvent\x12/\n\x12orchestrationState\x18\x01 \x01(\x0b\x32\x13.OrchestrationState\"A\n\x12\x43ontinueAsNewEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"F\n\x17\x45xecutionSuspendedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"D\n\x15\x45xecutionResumedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x86\t\n\x0cHistoryEvent\x12\x0f\n\x07\x65ventId\x18\x01 \x01(\x05\x12-\n\ttimestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x10\x65xecutionStarted\x18\x03 \x01(\x0b\x32\x16.ExecutionStartedEventH\x00\x12\x36\n\x12\x65xecutionCompleted\x18\x04 \x01(\x0b\x32\x18.ExecutionCompletedEventH\x00\x12\x38\n\x13\x65xecutionTerminated\x18\x05 \x01(\x0b\x32\x19.ExecutionTerminatedEventH\x00\x12,\n\rtaskScheduled\x18\x06 \x01(\x0b\x32\x13.TaskScheduledEventH\x00\x12,\n\rtaskCompleted\x18\x07 \x01(\x0b\x32\x13.TaskCompletedEventH\x00\x12&\n\ntaskFailed\x18\x08 \x01(\x0b\x32\x10.TaskFailedEventH\x00\x12P\n\x1fsubOrchestrationInstanceCreated\x18\t \x01(\x0b\x32%.SubOrchestrationInstanceCreatedEventH\x00\x12T\n!subOrchestrationInstanceCompleted\x18\n \x01(\x0b\x32\'.SubOrchestrationInstanceCompletedEventH\x00\x12N\n\x1esubOrchestrationInstanceFailed\x18\x0b \x01(\x0b\x32$.SubOrchestrationInstanceFailedEventH\x00\x12*\n\x0ctimerCreated\x18\x0c \x01(\x0b\x32\x12.TimerCreatedEventH\x00\x12&\n\ntimerFired\x18\r \x01(\x0b\x32\x10.TimerFiredEventH\x00\x12\x38\n\x13orchestratorStarted\x18\x0e \x01(\x0b\x32\x19.OrchestratorStartedEventH\x00\x12<\n\x15orchestratorCompleted\x18\x0f \x01(\x0b\x32\x1b.OrchestratorCompletedEventH\x00\x12$\n\teventSent\x18\x10 \x01(\x0b\x32\x0f.EventSentEventH\x00\x12(\n\x0b\x65ventRaised\x18\x11 \x01(\x0b\x32\x11.EventRaisedEventH\x00\x12%\n\x0cgenericEvent\x18\x12 \x01(\x0b\x32\r.GenericEventH\x00\x12*\n\x0chistoryState\x18\x13 \x01(\x0b\x32\x12.HistoryStateEventH\x00\x12,\n\rcontinueAsNew\x18\x14 \x01(\x0b\x32\x13.ContinueAsNewEventH\x00\x12\x36\n\x12\x65xecutionSuspended\x18\x15 \x01(\x0b\x32\x18.ExecutionSuspendedEventH\x00\x12\x32\n\x10\x65xecutionResumed\x18\x16 \x01(\x0b\x32\x16.ExecutionResumedEventH\x00\x42\x0b\n\teventType\"~\n\x12ScheduleTaskAction\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x9c\x01\n\x1c\x43reateSubOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"?\n\x11\x43reateTimerAction\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"u\n\x0fSendEventAction\x12(\n\x08instance\x18\x01 \x01(\x0b\x32\x16.OrchestrationInstance\x12\x0c\n\x04name\x18\x02 \x01(\t\x12*\n\x04\x64\x61ta\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xb4\x02\n\x1b\x43ompleteOrchestrationAction\x12\x31\n\x13orchestrationStatus\x18\x01 \x01(\x0e\x32\x14.OrchestrationStatus\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07\x64\x65tails\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\nnewVersion\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12&\n\x0f\x63\x61rryoverEvents\x18\x05 \x03(\x0b\x32\r.HistoryEvent\x12+\n\x0e\x66\x61ilureDetails\x18\x06 \x01(\x0b\x32\x13.TaskFailureDetails\"q\n\x1cTerminateOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x0f\n\x07recurse\x18\x03 \x01(\x08\"\xfa\x02\n\x12OrchestratorAction\x12\n\n\x02id\x18\x01 \x01(\x05\x12+\n\x0cscheduleTask\x18\x02 \x01(\x0b\x32\x13.ScheduleTaskActionH\x00\x12?\n\x16\x63reateSubOrchestration\x18\x03 \x01(\x0b\x32\x1d.CreateSubOrchestrationActionH\x00\x12)\n\x0b\x63reateTimer\x18\x04 \x01(\x0b\x32\x12.CreateTimerActionH\x00\x12%\n\tsendEvent\x18\x05 \x01(\x0b\x32\x10.SendEventActionH\x00\x12=\n\x15\x63ompleteOrchestration\x18\x06 \x01(\x0b\x32\x1c.CompleteOrchestrationActionH\x00\x12?\n\x16terminateOrchestration\x18\x07 \x01(\x0b\x32\x1d.TerminateOrchestrationActionH\x00\x42\x18\n\x16orchestratorActionType\"\xda\x01\n\x13OrchestratorRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12!\n\npastEvents\x18\x03 \x03(\x0b\x32\r.HistoryEvent\x12 \n\tnewEvents\x18\x04 \x03(\x0b\x32\r.HistoryEvent\x12\x37\n\x10\x65ntityParameters\x18\x05 \x01(\x0b\x32\x1d.OrchestratorEntityParameters\"\x9d\x01\n\x14OrchestratorResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12$\n\x07\x61\x63tions\x18\x02 \x03(\x0b\x32\x13.OrchestratorAction\x12\x32\n\x0c\x63ustomStatus\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x17\n\x0f\x63ompletionToken\x18\x04 \x01(\t\"\xa3\x03\n\x15\x43reateInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12;\n\x17scheduledStartTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12?\n\x1aorchestrationIdReusePolicy\x18\x06 \x01(\x0b\x32\x1b.OrchestrationIdReusePolicy\x12\x31\n\x0b\x65xecutionId\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x04tags\x18\x08 \x03(\x0b\x32 .CreateInstanceRequest.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"w\n\x1aOrchestrationIdReusePolicy\x12-\n\x0foperationStatus\x18\x01 \x03(\x0e\x32\x14.OrchestrationStatus\x12*\n\x06\x61\x63tion\x18\x02 \x01(\x0e\x32\x1a.CreateOrchestrationAction\",\n\x16\x43reateInstanceResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\"E\n\x12GetInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x1b\n\x13getInputsAndOutputs\x18\x02 \x01(\x08\"V\n\x13GetInstanceResponse\x12\x0e\n\x06\x65xists\x18\x01 \x01(\x08\x12/\n\x12orchestrationState\x18\x02 \x01(\x0b\x32\x13.OrchestrationState\"Y\n\x15RewindInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x18\n\x16RewindInstanceResponse\"\xa4\x05\n\x12OrchestrationState\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\x13orchestrationStatus\x18\x04 \x01(\x0e\x32\x14.OrchestrationStatus\x12;\n\x17scheduledStartTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x34\n\x10\x63reatedTimestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x38\n\x14lastUpdatedTimestamp\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\x05input\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12,\n\x06output\x18\t \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0c\x63ustomStatus\x18\n \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x0b \x01(\x0b\x32\x13.TaskFailureDetails\x12\x31\n\x0b\x65xecutionId\x18\x0c \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x12\x63ompletedTimestamp\x18\r \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x36\n\x10parentInstanceId\x18\x0e \x01(\x0b\x32\x1c.google.protobuf.StringValue\"b\n\x11RaiseEventRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x14\n\x12RaiseEventResponse\"g\n\x10TerminateRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06output\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x11\n\trecursive\x18\x03 \x01(\x08\"\x13\n\x11TerminateResponse\"R\n\x0eSuspendRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x11\n\x0fSuspendResponse\"Q\n\rResumeRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x10\n\x0eResumeResponse\"6\n\x15QueryInstancesRequest\x12\x1d\n\x05query\x18\x01 \x01(\x0b\x32\x0e.InstanceQuery\"\x82\x03\n\rInstanceQuery\x12+\n\rruntimeStatus\x18\x01 \x03(\x0e\x32\x14.OrchestrationStatus\x12\x33\n\x0f\x63reatedTimeFrom\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rcreatedTimeTo\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0ctaskHubNames\x18\x04 \x03(\x0b\x32\x1c.google.protobuf.StringValue\x12\x18\n\x10maxInstanceCount\x18\x05 \x01(\x05\x12\x37\n\x11\x63ontinuationToken\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10instanceIdPrefix\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1d\n\x15\x66\x65tchInputsAndOutputs\x18\x08 \x01(\x08\"\x82\x01\n\x16QueryInstancesResponse\x12/\n\x12orchestrationState\x18\x01 \x03(\x0b\x32\x13.OrchestrationState\x12\x37\n\x11\x63ontinuationToken\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x80\x01\n\x15PurgeInstancesRequest\x12\x14\n\ninstanceId\x18\x01 \x01(\tH\x00\x12\x33\n\x13purgeInstanceFilter\x18\x02 \x01(\x0b\x32\x14.PurgeInstanceFilterH\x00\x12\x11\n\trecursive\x18\x03 \x01(\x08\x42\t\n\x07request\"\xaa\x01\n\x13PurgeInstanceFilter\x12\x33\n\x0f\x63reatedTimeFrom\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rcreatedTimeTo\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\rruntimeStatus\x18\x03 \x03(\x0e\x32\x14.OrchestrationStatus\"6\n\x16PurgeInstancesResponse\x12\x1c\n\x14\x64\x65letedInstanceCount\x18\x01 \x01(\x05\"0\n\x14\x43reateTaskHubRequest\x12\x18\n\x10recreateIfExists\x18\x01 \x01(\x08\"\x17\n\x15\x43reateTaskHubResponse\"\x16\n\x14\x44\x65leteTaskHubRequest\"\x17\n\x15\x44\x65leteTaskHubResponse\"\xaa\x01\n\x13SignalEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x11\n\trequestId\x18\x04 \x01(\t\x12\x31\n\rscheduledTime\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\x16\n\x14SignalEntityResponse\"<\n\x10GetEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x14\n\x0cincludeState\x18\x02 \x01(\x08\"D\n\x11GetEntityResponse\x12\x0e\n\x06\x65xists\x18\x01 \x01(\x08\x12\x1f\n\x06\x65ntity\x18\x02 \x01(\x0b\x32\x0f.EntityMetadata\"\xcb\x02\n\x0b\x45ntityQuery\x12:\n\x14instanceIdStartsWith\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x34\n\x10lastModifiedFrom\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0elastModifiedTo\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x14\n\x0cincludeState\x18\x04 \x01(\x08\x12\x18\n\x10includeTransient\x18\x05 \x01(\x08\x12-\n\x08pageSize\x18\x06 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12\x37\n\x11\x63ontinuationToken\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"3\n\x14QueryEntitiesRequest\x12\x1b\n\x05query\x18\x01 \x01(\x0b\x32\x0c.EntityQuery\"s\n\x15QueryEntitiesResponse\x12!\n\x08\x65ntities\x18\x01 \x03(\x0b\x32\x0f.EntityMetadata\x12\x37\n\x11\x63ontinuationToken\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xdb\x01\n\x0e\x45ntityMetadata\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x34\n\x10lastModifiedTime\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x18\n\x10\x62\x61\x63klogQueueSize\x18\x03 \x01(\x05\x12.\n\x08lockedBy\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x0fserializedState\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x8f\x01\n\x19\x43leanEntityStorageRequest\x12\x37\n\x11\x63ontinuationToken\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1b\n\x13removeEmptyEntities\x18\x02 \x01(\x08\x12\x1c\n\x14releaseOrphanedLocks\x18\x03 \x01(\x08\"\x92\x01\n\x1a\x43leanEntityStorageResponse\x12\x37\n\x11\x63ontinuationToken\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1c\n\x14\x65mptyEntitiesRemoved\x18\x02 \x01(\x05\x12\x1d\n\x15orphanedLocksReleased\x18\x03 \x01(\x05\"]\n\x1cOrchestratorEntityParameters\x12=\n\x1a\x65ntityMessageReorderWindow\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\"\x82\x01\n\x12\x45ntityBatchRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65ntityState\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12%\n\noperations\x18\x03 \x03(\x0b\x32\x11.OperationRequest\"\xb9\x01\n\x11\x45ntityBatchResult\x12!\n\x07results\x18\x01 \x03(\x0b\x32\x10.OperationResult\x12!\n\x07\x61\x63tions\x18\x02 \x03(\x0b\x32\x10.OperationAction\x12\x31\n\x0b\x65ntityState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\"e\n\x10OperationRequest\x12\x11\n\toperation\x18\x01 \x01(\t\x12\x11\n\trequestId\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"w\n\x0fOperationResult\x12*\n\x07success\x18\x01 \x01(\x0b\x32\x17.OperationResultSuccessH\x00\x12*\n\x07\x66\x61ilure\x18\x02 \x01(\x0b\x32\x17.OperationResultFailureH\x00\x42\x0c\n\nresultType\"F\n\x16OperationResultSuccess\x12,\n\x06result\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"E\n\x16OperationResultFailure\x12+\n\x0e\x66\x61ilureDetails\x18\x01 \x01(\x0b\x32\x13.TaskFailureDetails\"\x9c\x01\n\x0fOperationAction\x12\n\n\x02id\x18\x01 \x01(\x05\x12\'\n\nsendSignal\x18\x02 \x01(\x0b\x32\x11.SendSignalActionH\x00\x12=\n\x15startNewOrchestration\x18\x03 \x01(\x0b\x32\x1c.StartNewOrchestrationActionH\x00\x42\x15\n\x13operationActionType\"\x94\x01\n\x10SendSignalAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rscheduledTime\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xce\x01\n\x1bStartNewOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rscheduledTime\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"j\n\x13GetWorkItemsRequest\x12+\n#maxConcurrentOrchestrationWorkItems\x18\x01 \x01(\x05\x12&\n\x1emaxConcurrentActivityWorkItems\x18\x02 \x01(\x05\"\xe1\x01\n\x08WorkItem\x12\x33\n\x13orchestratorRequest\x18\x01 \x01(\x0b\x32\x14.OrchestratorRequestH\x00\x12+\n\x0f\x61\x63tivityRequest\x18\x02 \x01(\x0b\x32\x10.ActivityRequestH\x00\x12,\n\rentityRequest\x18\x03 \x01(\x0b\x32\x13.EntityBatchRequestH\x00\x12!\n\nhealthPing\x18\x04 \x01(\x0b\x32\x0b.HealthPingH\x00\x12\x17\n\x0f\x63ompletionToken\x18\n \x01(\tB\t\n\x07request\"\x16\n\x14\x43ompleteTaskResponse\"\x0c\n\nHealthPing*\xb5\x02\n\x13OrchestrationStatus\x12 \n\x1cORCHESTRATION_STATUS_RUNNING\x10\x00\x12\"\n\x1eORCHESTRATION_STATUS_COMPLETED\x10\x01\x12)\n%ORCHESTRATION_STATUS_CONTINUED_AS_NEW\x10\x02\x12\x1f\n\x1bORCHESTRATION_STATUS_FAILED\x10\x03\x12!\n\x1dORCHESTRATION_STATUS_CANCELED\x10\x04\x12#\n\x1fORCHESTRATION_STATUS_TERMINATED\x10\x05\x12 \n\x1cORCHESTRATION_STATUS_PENDING\x10\x06\x12\"\n\x1eORCHESTRATION_STATUS_SUSPENDED\x10\x07*A\n\x19\x43reateOrchestrationAction\x12\t\n\x05\x45RROR\x10\x00\x12\n\n\x06IGNORE\x10\x01\x12\r\n\tTERMINATE\x10\x02\x32\xfc\n\n\x15TaskHubSidecarService\x12\x37\n\x05Hello\x12\x16.google.protobuf.Empty\x1a\x16.google.protobuf.Empty\x12@\n\rStartInstance\x12\x16.CreateInstanceRequest\x1a\x17.CreateInstanceResponse\x12\x38\n\x0bGetInstance\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x41\n\x0eRewindInstance\x12\x16.RewindInstanceRequest\x1a\x17.RewindInstanceResponse\x12\x41\n\x14WaitForInstanceStart\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x46\n\x19WaitForInstanceCompletion\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x35\n\nRaiseEvent\x12\x12.RaiseEventRequest\x1a\x13.RaiseEventResponse\x12:\n\x11TerminateInstance\x12\x11.TerminateRequest\x1a\x12.TerminateResponse\x12\x34\n\x0fSuspendInstance\x12\x0f.SuspendRequest\x1a\x10.SuspendResponse\x12\x31\n\x0eResumeInstance\x12\x0e.ResumeRequest\x1a\x0f.ResumeResponse\x12\x41\n\x0eQueryInstances\x12\x16.QueryInstancesRequest\x1a\x17.QueryInstancesResponse\x12\x41\n\x0ePurgeInstances\x12\x16.PurgeInstancesRequest\x1a\x17.PurgeInstancesResponse\x12\x31\n\x0cGetWorkItems\x12\x14.GetWorkItemsRequest\x1a\t.WorkItem0\x01\x12@\n\x14\x43ompleteActivityTask\x12\x11.ActivityResponse\x1a\x15.CompleteTaskResponse\x12H\n\x18\x43ompleteOrchestratorTask\x12\x15.OrchestratorResponse\x1a\x15.CompleteTaskResponse\x12?\n\x12\x43ompleteEntityTask\x12\x12.EntityBatchResult\x1a\x15.CompleteTaskResponse\x12>\n\rCreateTaskHub\x12\x15.CreateTaskHubRequest\x1a\x16.CreateTaskHubResponse\x12>\n\rDeleteTaskHub\x12\x15.DeleteTaskHubRequest\x1a\x16.DeleteTaskHubResponse\x12;\n\x0cSignalEntity\x12\x14.SignalEntityRequest\x1a\x15.SignalEntityResponse\x12\x32\n\tGetEntity\x12\x11.GetEntityRequest\x1a\x12.GetEntityResponse\x12>\n\rQueryEntities\x12\x15.QueryEntitiesRequest\x1a\x16.QueryEntitiesResponse\x12M\n\x12\x43leanEntityStorage\x12\x1a.CleanEntityStorageRequest\x1a\x1b.CleanEntityStorageResponseBf\n1com.microsoft.durabletask.implementation.protobufZ\x10/internal/protos\xaa\x02\x1eMicrosoft.DurableTask.Protobufb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -30,184 +30,184 @@ _globals['_TRACECONTEXT'].fields_by_name['spanID']._serialized_options = b'\030\001' _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._options = None _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_options = b'8\001' - _globals['_ORCHESTRATIONSTATUS']._serialized_start=12097 - _globals['_ORCHESTRATIONSTATUS']._serialized_end=12406 - _globals['_CREATEORCHESTRATIONACTION']._serialized_start=12408 - _globals['_CREATEORCHESTRATIONACTION']._serialized_end=12473 + _globals['_ORCHESTRATIONSTATUS']._serialized_start=12232 + _globals['_ORCHESTRATIONSTATUS']._serialized_end=12541 + _globals['_CREATEORCHESTRATIONACTION']._serialized_start=12543 + _globals['_CREATEORCHESTRATIONACTION']._serialized_end=12608 _globals['_ORCHESTRATIONINSTANCE']._serialized_start=177 _globals['_ORCHESTRATIONINSTANCE']._serialized_end=271 _globals['_ACTIVITYREQUEST']._serialized_start=274 _globals['_ACTIVITYREQUEST']._serialized_end=511 _globals['_ACTIVITYRESPONSE']._serialized_start=514 - _globals['_ACTIVITYRESPONSE']._serialized_end=659 - _globals['_TASKFAILUREDETAILS']._serialized_start=662 - _globals['_TASKFAILUREDETAILS']._serialized_end=840 - _globals['_PARENTINSTANCEINFO']._serialized_start=843 - _globals['_PARENTINSTANCEINFO']._serialized_end=1034 - _globals['_TRACECONTEXT']._serialized_start=1036 - _globals['_TRACECONTEXT']._serialized_end=1141 - _globals['_EXECUTIONSTARTEDEVENT']._serialized_start=1144 - _globals['_EXECUTIONSTARTEDEVENT']._serialized_end=1536 - _globals['_EXECUTIONCOMPLETEDEVENT']._serialized_start=1539 - _globals['_EXECUTIONCOMPLETEDEVENT']._serialized_end=1706 - _globals['_EXECUTIONTERMINATEDEVENT']._serialized_start=1708 - _globals['_EXECUTIONTERMINATEDEVENT']._serialized_end=1796 - _globals['_TASKSCHEDULEDEVENT']._serialized_start=1799 - _globals['_TASKSCHEDULEDEVENT']._serialized_end=1968 - _globals['_TASKCOMPLETEDEVENT']._serialized_start=1970 - _globals['_TASKCOMPLETEDEVENT']._serialized_end=2061 - _globals['_TASKFAILEDEVENT']._serialized_start=2063 - _globals['_TASKFAILEDEVENT']._serialized_end=2150 - _globals['_SUBORCHESTRATIONINSTANCECREATEDEVENT']._serialized_start=2153 - _globals['_SUBORCHESTRATIONINSTANCECREATEDEVENT']._serialized_end=2360 - _globals['_SUBORCHESTRATIONINSTANCECOMPLETEDEVENT']._serialized_start=2362 - _globals['_SUBORCHESTRATIONINSTANCECOMPLETEDEVENT']._serialized_end=2473 - _globals['_SUBORCHESTRATIONINSTANCEFAILEDEVENT']._serialized_start=2475 - _globals['_SUBORCHESTRATIONINSTANCEFAILEDEVENT']._serialized_end=2582 - _globals['_TIMERCREATEDEVENT']._serialized_start=2584 - _globals['_TIMERCREATEDEVENT']._serialized_end=2647 - _globals['_TIMERFIREDEVENT']._serialized_start=2649 - _globals['_TIMERFIREDEVENT']._serialized_end=2727 - _globals['_ORCHESTRATORSTARTEDEVENT']._serialized_start=2729 - _globals['_ORCHESTRATORSTARTEDEVENT']._serialized_end=2755 - _globals['_ORCHESTRATORCOMPLETEDEVENT']._serialized_start=2757 - _globals['_ORCHESTRATORCOMPLETEDEVENT']._serialized_end=2785 - _globals['_EVENTSENTEVENT']._serialized_start=2787 - _globals['_EVENTSENTEVENT']._serialized_end=2882 - _globals['_EVENTRAISEDEVENT']._serialized_start=2884 - _globals['_EVENTRAISEDEVENT']._serialized_end=2961 - _globals['_GENERICEVENT']._serialized_start=2963 - _globals['_GENERICEVENT']._serialized_end=3021 - _globals['_HISTORYSTATEEVENT']._serialized_start=3023 - _globals['_HISTORYSTATEEVENT']._serialized_end=3091 - _globals['_CONTINUEASNEWEVENT']._serialized_start=3093 - _globals['_CONTINUEASNEWEVENT']._serialized_end=3158 - _globals['_EXECUTIONSUSPENDEDEVENT']._serialized_start=3160 - _globals['_EXECUTIONSUSPENDEDEVENT']._serialized_end=3230 - _globals['_EXECUTIONRESUMEDEVENT']._serialized_start=3232 - _globals['_EXECUTIONRESUMEDEVENT']._serialized_end=3300 - _globals['_HISTORYEVENT']._serialized_start=3303 - _globals['_HISTORYEVENT']._serialized_end=4461 - _globals['_SCHEDULETASKACTION']._serialized_start=4463 - _globals['_SCHEDULETASKACTION']._serialized_end=4589 - _globals['_CREATESUBORCHESTRATIONACTION']._serialized_start=4592 - _globals['_CREATESUBORCHESTRATIONACTION']._serialized_end=4748 - _globals['_CREATETIMERACTION']._serialized_start=4750 - _globals['_CREATETIMERACTION']._serialized_end=4813 - _globals['_SENDEVENTACTION']._serialized_start=4815 - _globals['_SENDEVENTACTION']._serialized_end=4932 - _globals['_COMPLETEORCHESTRATIONACTION']._serialized_start=4935 - _globals['_COMPLETEORCHESTRATIONACTION']._serialized_end=5243 - _globals['_TERMINATEORCHESTRATIONACTION']._serialized_start=5245 - _globals['_TERMINATEORCHESTRATIONACTION']._serialized_end=5358 - _globals['_ORCHESTRATORACTION']._serialized_start=5361 - _globals['_ORCHESTRATORACTION']._serialized_end=5739 - _globals['_ORCHESTRATORREQUEST']._serialized_start=5742 - _globals['_ORCHESTRATORREQUEST']._serialized_end=5960 - _globals['_ORCHESTRATORRESPONSE']._serialized_start=5963 - _globals['_ORCHESTRATORRESPONSE']._serialized_end=6095 - _globals['_CREATEINSTANCEREQUEST']._serialized_start=6098 - _globals['_CREATEINSTANCEREQUEST']._serialized_end=6517 - _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_start=6474 - _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_end=6517 - _globals['_ORCHESTRATIONIDREUSEPOLICY']._serialized_start=6519 - _globals['_ORCHESTRATIONIDREUSEPOLICY']._serialized_end=6638 - _globals['_CREATEINSTANCERESPONSE']._serialized_start=6640 - _globals['_CREATEINSTANCERESPONSE']._serialized_end=6684 - _globals['_GETINSTANCEREQUEST']._serialized_start=6686 - _globals['_GETINSTANCEREQUEST']._serialized_end=6755 - _globals['_GETINSTANCERESPONSE']._serialized_start=6757 - _globals['_GETINSTANCERESPONSE']._serialized_end=6843 - _globals['_REWINDINSTANCEREQUEST']._serialized_start=6845 - _globals['_REWINDINSTANCEREQUEST']._serialized_end=6934 - _globals['_REWINDINSTANCERESPONSE']._serialized_start=6936 - _globals['_REWINDINSTANCERESPONSE']._serialized_end=6960 - _globals['_ORCHESTRATIONSTATE']._serialized_start=6963 - _globals['_ORCHESTRATIONSTATE']._serialized_end=7639 - _globals['_RAISEEVENTREQUEST']._serialized_start=7641 - _globals['_RAISEEVENTREQUEST']._serialized_end=7739 - _globals['_RAISEEVENTRESPONSE']._serialized_start=7741 - _globals['_RAISEEVENTRESPONSE']._serialized_end=7761 - _globals['_TERMINATEREQUEST']._serialized_start=7763 - _globals['_TERMINATEREQUEST']._serialized_end=7866 - _globals['_TERMINATERESPONSE']._serialized_start=7868 - _globals['_TERMINATERESPONSE']._serialized_end=7887 - _globals['_SUSPENDREQUEST']._serialized_start=7889 - _globals['_SUSPENDREQUEST']._serialized_end=7971 - _globals['_SUSPENDRESPONSE']._serialized_start=7973 - _globals['_SUSPENDRESPONSE']._serialized_end=7990 - _globals['_RESUMEREQUEST']._serialized_start=7992 - _globals['_RESUMEREQUEST']._serialized_end=8073 - _globals['_RESUMERESPONSE']._serialized_start=8075 - _globals['_RESUMERESPONSE']._serialized_end=8091 - _globals['_QUERYINSTANCESREQUEST']._serialized_start=8093 - _globals['_QUERYINSTANCESREQUEST']._serialized_end=8147 - _globals['_INSTANCEQUERY']._serialized_start=8150 - _globals['_INSTANCEQUERY']._serialized_end=8536 - _globals['_QUERYINSTANCESRESPONSE']._serialized_start=8539 - _globals['_QUERYINSTANCESRESPONSE']._serialized_end=8669 - _globals['_PURGEINSTANCESREQUEST']._serialized_start=8672 - _globals['_PURGEINSTANCESREQUEST']._serialized_end=8800 - _globals['_PURGEINSTANCEFILTER']._serialized_start=8803 - _globals['_PURGEINSTANCEFILTER']._serialized_end=8973 - _globals['_PURGEINSTANCESRESPONSE']._serialized_start=8975 - _globals['_PURGEINSTANCESRESPONSE']._serialized_end=9029 - _globals['_CREATETASKHUBREQUEST']._serialized_start=9031 - _globals['_CREATETASKHUBREQUEST']._serialized_end=9079 - _globals['_CREATETASKHUBRESPONSE']._serialized_start=9081 - _globals['_CREATETASKHUBRESPONSE']._serialized_end=9104 - _globals['_DELETETASKHUBREQUEST']._serialized_start=9106 - _globals['_DELETETASKHUBREQUEST']._serialized_end=9128 - _globals['_DELETETASKHUBRESPONSE']._serialized_start=9130 - _globals['_DELETETASKHUBRESPONSE']._serialized_end=9153 - _globals['_SIGNALENTITYREQUEST']._serialized_start=9156 - _globals['_SIGNALENTITYREQUEST']._serialized_end=9326 - _globals['_SIGNALENTITYRESPONSE']._serialized_start=9328 - _globals['_SIGNALENTITYRESPONSE']._serialized_end=9350 - _globals['_GETENTITYREQUEST']._serialized_start=9352 - _globals['_GETENTITYREQUEST']._serialized_end=9412 - _globals['_GETENTITYRESPONSE']._serialized_start=9414 - _globals['_GETENTITYRESPONSE']._serialized_end=9482 - _globals['_ENTITYQUERY']._serialized_start=9485 - _globals['_ENTITYQUERY']._serialized_end=9816 - _globals['_QUERYENTITIESREQUEST']._serialized_start=9818 - _globals['_QUERYENTITIESREQUEST']._serialized_end=9869 - _globals['_QUERYENTITIESRESPONSE']._serialized_start=9871 - _globals['_QUERYENTITIESRESPONSE']._serialized_end=9986 - _globals['_ENTITYMETADATA']._serialized_start=9989 - _globals['_ENTITYMETADATA']._serialized_end=10208 - _globals['_CLEANENTITYSTORAGEREQUEST']._serialized_start=10211 - _globals['_CLEANENTITYSTORAGEREQUEST']._serialized_end=10354 - _globals['_CLEANENTITYSTORAGERESPONSE']._serialized_start=10357 - _globals['_CLEANENTITYSTORAGERESPONSE']._serialized_end=10503 - _globals['_ORCHESTRATORENTITYPARAMETERS']._serialized_start=10505 - _globals['_ORCHESTRATORENTITYPARAMETERS']._serialized_end=10598 - _globals['_ENTITYBATCHREQUEST']._serialized_start=10601 - _globals['_ENTITYBATCHREQUEST']._serialized_end=10731 - _globals['_ENTITYBATCHRESULT']._serialized_start=10734 - _globals['_ENTITYBATCHRESULT']._serialized_end=10919 - _globals['_OPERATIONREQUEST']._serialized_start=10921 - _globals['_OPERATIONREQUEST']._serialized_end=11022 - _globals['_OPERATIONRESULT']._serialized_start=11024 - _globals['_OPERATIONRESULT']._serialized_end=11143 - _globals['_OPERATIONRESULTSUCCESS']._serialized_start=11145 - _globals['_OPERATIONRESULTSUCCESS']._serialized_end=11215 - _globals['_OPERATIONRESULTFAILURE']._serialized_start=11217 - _globals['_OPERATIONRESULTFAILURE']._serialized_end=11286 - _globals['_OPERATIONACTION']._serialized_start=11289 - _globals['_OPERATIONACTION']._serialized_end=11445 - _globals['_SENDSIGNALACTION']._serialized_start=11448 - _globals['_SENDSIGNALACTION']._serialized_end=11596 - _globals['_STARTNEWORCHESTRATIONACTION']._serialized_start=11599 - _globals['_STARTNEWORCHESTRATIONACTION']._serialized_end=11805 - _globals['_GETWORKITEMSREQUEST']._serialized_start=11807 - _globals['_GETWORKITEMSREQUEST']._serialized_end=11828 - _globals['_WORKITEM']._serialized_start=11831 - _globals['_WORKITEM']._serialized_end=12056 - _globals['_COMPLETETASKRESPONSE']._serialized_start=12058 - _globals['_COMPLETETASKRESPONSE']._serialized_end=12080 - _globals['_HEALTHPING']._serialized_start=12082 - _globals['_HEALTHPING']._serialized_end=12094 - _globals['_TASKHUBSIDECARSERVICE']._serialized_start=12476 - _globals['_TASKHUBSIDECARSERVICE']._serialized_end=13880 + _globals['_ACTIVITYRESPONSE']._serialized_end=684 + _globals['_TASKFAILUREDETAILS']._serialized_start=687 + _globals['_TASKFAILUREDETAILS']._serialized_end=865 + _globals['_PARENTINSTANCEINFO']._serialized_start=868 + _globals['_PARENTINSTANCEINFO']._serialized_end=1059 + _globals['_TRACECONTEXT']._serialized_start=1061 + _globals['_TRACECONTEXT']._serialized_end=1166 + _globals['_EXECUTIONSTARTEDEVENT']._serialized_start=1169 + _globals['_EXECUTIONSTARTEDEVENT']._serialized_end=1561 + _globals['_EXECUTIONCOMPLETEDEVENT']._serialized_start=1564 + _globals['_EXECUTIONCOMPLETEDEVENT']._serialized_end=1731 + _globals['_EXECUTIONTERMINATEDEVENT']._serialized_start=1733 + _globals['_EXECUTIONTERMINATEDEVENT']._serialized_end=1821 + _globals['_TASKSCHEDULEDEVENT']._serialized_start=1824 + _globals['_TASKSCHEDULEDEVENT']._serialized_end=1993 + _globals['_TASKCOMPLETEDEVENT']._serialized_start=1995 + _globals['_TASKCOMPLETEDEVENT']._serialized_end=2086 + _globals['_TASKFAILEDEVENT']._serialized_start=2088 + _globals['_TASKFAILEDEVENT']._serialized_end=2175 + _globals['_SUBORCHESTRATIONINSTANCECREATEDEVENT']._serialized_start=2178 + _globals['_SUBORCHESTRATIONINSTANCECREATEDEVENT']._serialized_end=2385 + _globals['_SUBORCHESTRATIONINSTANCECOMPLETEDEVENT']._serialized_start=2387 + _globals['_SUBORCHESTRATIONINSTANCECOMPLETEDEVENT']._serialized_end=2498 + _globals['_SUBORCHESTRATIONINSTANCEFAILEDEVENT']._serialized_start=2500 + _globals['_SUBORCHESTRATIONINSTANCEFAILEDEVENT']._serialized_end=2607 + _globals['_TIMERCREATEDEVENT']._serialized_start=2609 + _globals['_TIMERCREATEDEVENT']._serialized_end=2672 + _globals['_TIMERFIREDEVENT']._serialized_start=2674 + _globals['_TIMERFIREDEVENT']._serialized_end=2752 + _globals['_ORCHESTRATORSTARTEDEVENT']._serialized_start=2754 + _globals['_ORCHESTRATORSTARTEDEVENT']._serialized_end=2780 + _globals['_ORCHESTRATORCOMPLETEDEVENT']._serialized_start=2782 + _globals['_ORCHESTRATORCOMPLETEDEVENT']._serialized_end=2810 + _globals['_EVENTSENTEVENT']._serialized_start=2812 + _globals['_EVENTSENTEVENT']._serialized_end=2907 + _globals['_EVENTRAISEDEVENT']._serialized_start=2909 + _globals['_EVENTRAISEDEVENT']._serialized_end=2986 + _globals['_GENERICEVENT']._serialized_start=2988 + _globals['_GENERICEVENT']._serialized_end=3046 + _globals['_HISTORYSTATEEVENT']._serialized_start=3048 + _globals['_HISTORYSTATEEVENT']._serialized_end=3116 + _globals['_CONTINUEASNEWEVENT']._serialized_start=3118 + _globals['_CONTINUEASNEWEVENT']._serialized_end=3183 + _globals['_EXECUTIONSUSPENDEDEVENT']._serialized_start=3185 + _globals['_EXECUTIONSUSPENDEDEVENT']._serialized_end=3255 + _globals['_EXECUTIONRESUMEDEVENT']._serialized_start=3257 + _globals['_EXECUTIONRESUMEDEVENT']._serialized_end=3325 + _globals['_HISTORYEVENT']._serialized_start=3328 + _globals['_HISTORYEVENT']._serialized_end=4486 + _globals['_SCHEDULETASKACTION']._serialized_start=4488 + _globals['_SCHEDULETASKACTION']._serialized_end=4614 + _globals['_CREATESUBORCHESTRATIONACTION']._serialized_start=4617 + _globals['_CREATESUBORCHESTRATIONACTION']._serialized_end=4773 + _globals['_CREATETIMERACTION']._serialized_start=4775 + _globals['_CREATETIMERACTION']._serialized_end=4838 + _globals['_SENDEVENTACTION']._serialized_start=4840 + _globals['_SENDEVENTACTION']._serialized_end=4957 + _globals['_COMPLETEORCHESTRATIONACTION']._serialized_start=4960 + _globals['_COMPLETEORCHESTRATIONACTION']._serialized_end=5268 + _globals['_TERMINATEORCHESTRATIONACTION']._serialized_start=5270 + _globals['_TERMINATEORCHESTRATIONACTION']._serialized_end=5383 + _globals['_ORCHESTRATORACTION']._serialized_start=5386 + _globals['_ORCHESTRATORACTION']._serialized_end=5764 + _globals['_ORCHESTRATORREQUEST']._serialized_start=5767 + _globals['_ORCHESTRATORREQUEST']._serialized_end=5985 + _globals['_ORCHESTRATORRESPONSE']._serialized_start=5988 + _globals['_ORCHESTRATORRESPONSE']._serialized_end=6145 + _globals['_CREATEINSTANCEREQUEST']._serialized_start=6148 + _globals['_CREATEINSTANCEREQUEST']._serialized_end=6567 + _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_start=6524 + _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_end=6567 + _globals['_ORCHESTRATIONIDREUSEPOLICY']._serialized_start=6569 + _globals['_ORCHESTRATIONIDREUSEPOLICY']._serialized_end=6688 + _globals['_CREATEINSTANCERESPONSE']._serialized_start=6690 + _globals['_CREATEINSTANCERESPONSE']._serialized_end=6734 + _globals['_GETINSTANCEREQUEST']._serialized_start=6736 + _globals['_GETINSTANCEREQUEST']._serialized_end=6805 + _globals['_GETINSTANCERESPONSE']._serialized_start=6807 + _globals['_GETINSTANCERESPONSE']._serialized_end=6893 + _globals['_REWINDINSTANCEREQUEST']._serialized_start=6895 + _globals['_REWINDINSTANCEREQUEST']._serialized_end=6984 + _globals['_REWINDINSTANCERESPONSE']._serialized_start=6986 + _globals['_REWINDINSTANCERESPONSE']._serialized_end=7010 + _globals['_ORCHESTRATIONSTATE']._serialized_start=7013 + _globals['_ORCHESTRATIONSTATE']._serialized_end=7689 + _globals['_RAISEEVENTREQUEST']._serialized_start=7691 + _globals['_RAISEEVENTREQUEST']._serialized_end=7789 + _globals['_RAISEEVENTRESPONSE']._serialized_start=7791 + _globals['_RAISEEVENTRESPONSE']._serialized_end=7811 + _globals['_TERMINATEREQUEST']._serialized_start=7813 + _globals['_TERMINATEREQUEST']._serialized_end=7916 + _globals['_TERMINATERESPONSE']._serialized_start=7918 + _globals['_TERMINATERESPONSE']._serialized_end=7937 + _globals['_SUSPENDREQUEST']._serialized_start=7939 + _globals['_SUSPENDREQUEST']._serialized_end=8021 + _globals['_SUSPENDRESPONSE']._serialized_start=8023 + _globals['_SUSPENDRESPONSE']._serialized_end=8040 + _globals['_RESUMEREQUEST']._serialized_start=8042 + _globals['_RESUMEREQUEST']._serialized_end=8123 + _globals['_RESUMERESPONSE']._serialized_start=8125 + _globals['_RESUMERESPONSE']._serialized_end=8141 + _globals['_QUERYINSTANCESREQUEST']._serialized_start=8143 + _globals['_QUERYINSTANCESREQUEST']._serialized_end=8197 + _globals['_INSTANCEQUERY']._serialized_start=8200 + _globals['_INSTANCEQUERY']._serialized_end=8586 + _globals['_QUERYINSTANCESRESPONSE']._serialized_start=8589 + _globals['_QUERYINSTANCESRESPONSE']._serialized_end=8719 + _globals['_PURGEINSTANCESREQUEST']._serialized_start=8722 + _globals['_PURGEINSTANCESREQUEST']._serialized_end=8850 + _globals['_PURGEINSTANCEFILTER']._serialized_start=8853 + _globals['_PURGEINSTANCEFILTER']._serialized_end=9023 + _globals['_PURGEINSTANCESRESPONSE']._serialized_start=9025 + _globals['_PURGEINSTANCESRESPONSE']._serialized_end=9079 + _globals['_CREATETASKHUBREQUEST']._serialized_start=9081 + _globals['_CREATETASKHUBREQUEST']._serialized_end=9129 + _globals['_CREATETASKHUBRESPONSE']._serialized_start=9131 + _globals['_CREATETASKHUBRESPONSE']._serialized_end=9154 + _globals['_DELETETASKHUBREQUEST']._serialized_start=9156 + _globals['_DELETETASKHUBREQUEST']._serialized_end=9178 + _globals['_DELETETASKHUBRESPONSE']._serialized_start=9180 + _globals['_DELETETASKHUBRESPONSE']._serialized_end=9203 + _globals['_SIGNALENTITYREQUEST']._serialized_start=9206 + _globals['_SIGNALENTITYREQUEST']._serialized_end=9376 + _globals['_SIGNALENTITYRESPONSE']._serialized_start=9378 + _globals['_SIGNALENTITYRESPONSE']._serialized_end=9400 + _globals['_GETENTITYREQUEST']._serialized_start=9402 + _globals['_GETENTITYREQUEST']._serialized_end=9462 + _globals['_GETENTITYRESPONSE']._serialized_start=9464 + _globals['_GETENTITYRESPONSE']._serialized_end=9532 + _globals['_ENTITYQUERY']._serialized_start=9535 + _globals['_ENTITYQUERY']._serialized_end=9866 + _globals['_QUERYENTITIESREQUEST']._serialized_start=9868 + _globals['_QUERYENTITIESREQUEST']._serialized_end=9919 + _globals['_QUERYENTITIESRESPONSE']._serialized_start=9921 + _globals['_QUERYENTITIESRESPONSE']._serialized_end=10036 + _globals['_ENTITYMETADATA']._serialized_start=10039 + _globals['_ENTITYMETADATA']._serialized_end=10258 + _globals['_CLEANENTITYSTORAGEREQUEST']._serialized_start=10261 + _globals['_CLEANENTITYSTORAGEREQUEST']._serialized_end=10404 + _globals['_CLEANENTITYSTORAGERESPONSE']._serialized_start=10407 + _globals['_CLEANENTITYSTORAGERESPONSE']._serialized_end=10553 + _globals['_ORCHESTRATORENTITYPARAMETERS']._serialized_start=10555 + _globals['_ORCHESTRATORENTITYPARAMETERS']._serialized_end=10648 + _globals['_ENTITYBATCHREQUEST']._serialized_start=10651 + _globals['_ENTITYBATCHREQUEST']._serialized_end=10781 + _globals['_ENTITYBATCHRESULT']._serialized_start=10784 + _globals['_ENTITYBATCHRESULT']._serialized_end=10969 + _globals['_OPERATIONREQUEST']._serialized_start=10971 + _globals['_OPERATIONREQUEST']._serialized_end=11072 + _globals['_OPERATIONRESULT']._serialized_start=11074 + _globals['_OPERATIONRESULT']._serialized_end=11193 + _globals['_OPERATIONRESULTSUCCESS']._serialized_start=11195 + _globals['_OPERATIONRESULTSUCCESS']._serialized_end=11265 + _globals['_OPERATIONRESULTFAILURE']._serialized_start=11267 + _globals['_OPERATIONRESULTFAILURE']._serialized_end=11336 + _globals['_OPERATIONACTION']._serialized_start=11339 + _globals['_OPERATIONACTION']._serialized_end=11495 + _globals['_SENDSIGNALACTION']._serialized_start=11498 + _globals['_SENDSIGNALACTION']._serialized_end=11646 + _globals['_STARTNEWORCHESTRATIONACTION']._serialized_start=11649 + _globals['_STARTNEWORCHESTRATIONACTION']._serialized_end=11855 + _globals['_GETWORKITEMSREQUEST']._serialized_start=11857 + _globals['_GETWORKITEMSREQUEST']._serialized_end=11963 + _globals['_WORKITEM']._serialized_start=11966 + _globals['_WORKITEM']._serialized_end=12191 + _globals['_COMPLETETASKRESPONSE']._serialized_start=12193 + _globals['_COMPLETETASKRESPONSE']._serialized_end=12215 + _globals['_HEALTHPING']._serialized_start=12217 + _globals['_HEALTHPING']._serialized_end=12229 + _globals['_TASKHUBSIDECARSERVICE']._serialized_start=12611 + _globals['_TASKHUBSIDECARSERVICE']._serialized_end=14015 # @@protoc_insertion_point(module_scope) diff --git a/durabletask/internal/orchestrator_service_pb2.pyi b/durabletask/internal/orchestrator_service_pb2.pyi index 82d2e1a..84d2af8 100644 --- a/durabletask/internal/orchestrator_service_pb2.pyi +++ b/durabletask/internal/orchestrator_service_pb2.pyi @@ -63,16 +63,18 @@ class ActivityRequest(_message.Message): def __init__(self, name: _Optional[str] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., orchestrationInstance: _Optional[_Union[OrchestrationInstance, _Mapping]] = ..., taskId: _Optional[int] = ..., parentTraceContext: _Optional[_Union[TraceContext, _Mapping]] = ...) -> None: ... class ActivityResponse(_message.Message): - __slots__ = ("instanceId", "taskId", "result", "failureDetails") + __slots__ = ("instanceId", "taskId", "result", "failureDetails", "completionToken") INSTANCEID_FIELD_NUMBER: _ClassVar[int] TASKID_FIELD_NUMBER: _ClassVar[int] RESULT_FIELD_NUMBER: _ClassVar[int] FAILUREDETAILS_FIELD_NUMBER: _ClassVar[int] + COMPLETIONTOKEN_FIELD_NUMBER: _ClassVar[int] instanceId: str taskId: int result: _wrappers_pb2.StringValue failureDetails: TaskFailureDetails - def __init__(self, instanceId: _Optional[str] = ..., taskId: _Optional[int] = ..., result: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., failureDetails: _Optional[_Union[TaskFailureDetails, _Mapping]] = ...) -> None: ... + completionToken: str + def __init__(self, instanceId: _Optional[str] = ..., taskId: _Optional[int] = ..., result: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., failureDetails: _Optional[_Union[TaskFailureDetails, _Mapping]] = ..., completionToken: _Optional[str] = ...) -> None: ... class TaskFailureDetails(_message.Message): __slots__ = ("errorType", "errorMessage", "stackTrace", "innerFailure", "isNonRetriable") @@ -421,14 +423,16 @@ class OrchestratorRequest(_message.Message): def __init__(self, instanceId: _Optional[str] = ..., executionId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., pastEvents: _Optional[_Iterable[_Union[HistoryEvent, _Mapping]]] = ..., newEvents: _Optional[_Iterable[_Union[HistoryEvent, _Mapping]]] = ..., entityParameters: _Optional[_Union[OrchestratorEntityParameters, _Mapping]] = ...) -> None: ... class OrchestratorResponse(_message.Message): - __slots__ = ("instanceId", "actions", "customStatus") + __slots__ = ("instanceId", "actions", "customStatus", "completionToken") INSTANCEID_FIELD_NUMBER: _ClassVar[int] ACTIONS_FIELD_NUMBER: _ClassVar[int] CUSTOMSTATUS_FIELD_NUMBER: _ClassVar[int] + COMPLETIONTOKEN_FIELD_NUMBER: _ClassVar[int] instanceId: str actions: _containers.RepeatedCompositeFieldContainer[OrchestratorAction] customStatus: _wrappers_pb2.StringValue - def __init__(self, instanceId: _Optional[str] = ..., actions: _Optional[_Iterable[_Union[OrchestratorAction, _Mapping]]] = ..., customStatus: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ...) -> None: ... + completionToken: str + def __init__(self, instanceId: _Optional[str] = ..., actions: _Optional[_Iterable[_Union[OrchestratorAction, _Mapping]]] = ..., customStatus: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., completionToken: _Optional[str] = ...) -> None: ... class CreateInstanceRequest(_message.Message): __slots__ = ("instanceId", "name", "version", "input", "scheduledStartTimestamp", "orchestrationIdReusePolicy", "executionId", "tags") @@ -856,8 +860,12 @@ class StartNewOrchestrationAction(_message.Message): def __init__(self, instanceId: _Optional[str] = ..., name: _Optional[str] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., scheduledTime: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ...) -> None: ... class GetWorkItemsRequest(_message.Message): - __slots__ = () - def __init__(self) -> None: ... + __slots__ = ("maxConcurrentOrchestrationWorkItems", "maxConcurrentActivityWorkItems") + MAXCONCURRENTORCHESTRATIONWORKITEMS_FIELD_NUMBER: _ClassVar[int] + MAXCONCURRENTACTIVITYWORKITEMS_FIELD_NUMBER: _ClassVar[int] + maxConcurrentOrchestrationWorkItems: int + maxConcurrentActivityWorkItems: int + def __init__(self, maxConcurrentOrchestrationWorkItems: _Optional[int] = ..., maxConcurrentActivityWorkItems: _Optional[int] = ...) -> None: ... class WorkItem(_message.Message): __slots__ = ("orchestratorRequest", "activityRequest", "entityRequest", "healthPing", "completionToken") diff --git a/submodules/durabletask-protobuf b/submodules/durabletask-protobuf deleted file mode 160000 index c7d8cd8..0000000 --- a/submodules/durabletask-protobuf +++ /dev/null @@ -1 +0,0 @@ -Subproject commit c7d8cd898017342d090ba9531c3f2ec45b8e07e7 From d46416045e68976bd4ed60721826b35cb2fababc Mon Sep 17 00:00:00 2001 From: wangbill Date: Thu, 23 Jan 2025 11:57:56 -0800 Subject: [PATCH 07/81] remove gitmodule file (#41) Signed-off-by: Elena Kolevska --- .gitmodules | 3 --- 1 file changed, 3 deletions(-) delete mode 100644 .gitmodules diff --git a/.gitmodules b/.gitmodules deleted file mode 100644 index b371516..0000000 --- a/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "submodules/durabletask-protobuf"] - path = submodules/durabletask-protobuf - url = https://github.com/microsoft/durabletask-protobuf From e5057f3e1ce28b98d02d55e0444f229232104a34 Mon Sep 17 00:00:00 2001 From: Elena Kolevska Date: Fri, 7 Feb 2025 22:00:30 +0000 Subject: [PATCH 08/81] Updated protos to latest Signed-off-by: Elena Kolevska --- durabletask/internal/PROTO_SOURCE_COMMIT_HASH | 2 +- .../internal/orchestrator_service_pb2.py | 280 ++++++++++-------- .../internal/orchestrator_service_pb2.pyi | 155 +++++++++- .../internal/orchestrator_service_pb2_grpc.py | 34 +++ 4 files changed, 331 insertions(+), 140 deletions(-) diff --git a/durabletask/internal/PROTO_SOURCE_COMMIT_HASH b/durabletask/internal/PROTO_SOURCE_COMMIT_HASH index ddbd31a..90bb04b 100644 --- a/durabletask/internal/PROTO_SOURCE_COMMIT_HASH +++ b/durabletask/internal/PROTO_SOURCE_COMMIT_HASH @@ -1 +1 @@ -443b333f4f65a438dc9eb4f090560d232afec4b7 +c672a0dc97c06587d7399ee12f1c5b0b9fc492a7c672a0dc97c06587d7399ee12f1c5b0b9fc492a7 diff --git a/durabletask/internal/orchestrator_service_pb2.py b/durabletask/internal/orchestrator_service_pb2.py index 44b4a32..5efef70 100644 --- a/durabletask/internal/orchestrator_service_pb2.py +++ b/durabletask/internal/orchestrator_service_pb2.py @@ -18,7 +18,7 @@ from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n/durabletask/internal/orchestrator_service.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1bgoogle/protobuf/empty.proto\"^\n\x15OrchestrationInstance\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xed\x01\n\x0f\x41\x63tivityRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\x12\x0e\n\x06taskId\x18\x05 \x01(\x05\x12)\n\x12parentTraceContext\x18\x06 \x01(\x0b\x32\r.TraceContext\"\xaa\x01\n\x10\x41\x63tivityResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0e\n\x06taskId\x18\x02 \x01(\x05\x12,\n\x06result\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\x12\x17\n\x0f\x63ompletionToken\x18\x05 \x01(\t\"\xb2\x01\n\x12TaskFailureDetails\x12\x11\n\terrorType\x18\x01 \x01(\t\x12\x14\n\x0c\x65rrorMessage\x18\x02 \x01(\t\x12\x30\n\nstackTrace\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x0cinnerFailure\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\x12\x16\n\x0eisNonRetriable\x18\x05 \x01(\x08\"\xbf\x01\n\x12ParentInstanceInfo\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12*\n\x04name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\"i\n\x0cTraceContext\x12\x13\n\x0btraceParent\x18\x01 \x01(\t\x12\x12\n\x06spanID\x18\x02 \x01(\tB\x02\x18\x01\x12\x30\n\ntraceState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x88\x03\n\x15\x45xecutionStartedEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\x12+\n\x0eparentInstance\x18\x05 \x01(\x0b\x32\x13.ParentInstanceInfo\x12;\n\x17scheduledStartTimestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12)\n\x12parentTraceContext\x18\x07 \x01(\x0b\x32\r.TraceContext\x12\x39\n\x13orchestrationSpanID\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xa7\x01\n\x17\x45xecutionCompletedEvent\x12\x31\n\x13orchestrationStatus\x18\x01 \x01(\x0e\x32\x14.OrchestrationStatus\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x03 \x01(\x0b\x32\x13.TaskFailureDetails\"X\n\x18\x45xecutionTerminatedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x0f\n\x07recurse\x18\x02 \x01(\x08\"\xa9\x01\n\x12TaskScheduledEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x12parentTraceContext\x18\x04 \x01(\x0b\x32\r.TraceContext\"[\n\x12TaskCompletedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"W\n\x0fTaskFailedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\"\xcf\x01\n$SubOrchestrationInstanceCreatedEvent\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x12parentTraceContext\x18\x05 \x01(\x0b\x32\r.TraceContext\"o\n&SubOrchestrationInstanceCompletedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"k\n#SubOrchestrationInstanceFailedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\"?\n\x11TimerCreatedEvent\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"N\n\x0fTimerFiredEvent\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0f\n\x07timerId\x18\x02 \x01(\x05\"\x1a\n\x18OrchestratorStartedEvent\"\x1c\n\x1aOrchestratorCompletedEvent\"_\n\x0e\x45ventSentEvent\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"M\n\x10\x45ventRaisedEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x05input\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\":\n\x0cGenericEvent\x12*\n\x04\x64\x61ta\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"D\n\x11HistoryStateEvent\x12/\n\x12orchestrationState\x18\x01 \x01(\x0b\x32\x13.OrchestrationState\"A\n\x12\x43ontinueAsNewEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"F\n\x17\x45xecutionSuspendedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"D\n\x15\x45xecutionResumedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x86\t\n\x0cHistoryEvent\x12\x0f\n\x07\x65ventId\x18\x01 \x01(\x05\x12-\n\ttimestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x10\x65xecutionStarted\x18\x03 \x01(\x0b\x32\x16.ExecutionStartedEventH\x00\x12\x36\n\x12\x65xecutionCompleted\x18\x04 \x01(\x0b\x32\x18.ExecutionCompletedEventH\x00\x12\x38\n\x13\x65xecutionTerminated\x18\x05 \x01(\x0b\x32\x19.ExecutionTerminatedEventH\x00\x12,\n\rtaskScheduled\x18\x06 \x01(\x0b\x32\x13.TaskScheduledEventH\x00\x12,\n\rtaskCompleted\x18\x07 \x01(\x0b\x32\x13.TaskCompletedEventH\x00\x12&\n\ntaskFailed\x18\x08 \x01(\x0b\x32\x10.TaskFailedEventH\x00\x12P\n\x1fsubOrchestrationInstanceCreated\x18\t \x01(\x0b\x32%.SubOrchestrationInstanceCreatedEventH\x00\x12T\n!subOrchestrationInstanceCompleted\x18\n \x01(\x0b\x32\'.SubOrchestrationInstanceCompletedEventH\x00\x12N\n\x1esubOrchestrationInstanceFailed\x18\x0b \x01(\x0b\x32$.SubOrchestrationInstanceFailedEventH\x00\x12*\n\x0ctimerCreated\x18\x0c \x01(\x0b\x32\x12.TimerCreatedEventH\x00\x12&\n\ntimerFired\x18\r \x01(\x0b\x32\x10.TimerFiredEventH\x00\x12\x38\n\x13orchestratorStarted\x18\x0e \x01(\x0b\x32\x19.OrchestratorStartedEventH\x00\x12<\n\x15orchestratorCompleted\x18\x0f \x01(\x0b\x32\x1b.OrchestratorCompletedEventH\x00\x12$\n\teventSent\x18\x10 \x01(\x0b\x32\x0f.EventSentEventH\x00\x12(\n\x0b\x65ventRaised\x18\x11 \x01(\x0b\x32\x11.EventRaisedEventH\x00\x12%\n\x0cgenericEvent\x18\x12 \x01(\x0b\x32\r.GenericEventH\x00\x12*\n\x0chistoryState\x18\x13 \x01(\x0b\x32\x12.HistoryStateEventH\x00\x12,\n\rcontinueAsNew\x18\x14 \x01(\x0b\x32\x13.ContinueAsNewEventH\x00\x12\x36\n\x12\x65xecutionSuspended\x18\x15 \x01(\x0b\x32\x18.ExecutionSuspendedEventH\x00\x12\x32\n\x10\x65xecutionResumed\x18\x16 \x01(\x0b\x32\x16.ExecutionResumedEventH\x00\x42\x0b\n\teventType\"~\n\x12ScheduleTaskAction\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x9c\x01\n\x1c\x43reateSubOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"?\n\x11\x43reateTimerAction\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"u\n\x0fSendEventAction\x12(\n\x08instance\x18\x01 \x01(\x0b\x32\x16.OrchestrationInstance\x12\x0c\n\x04name\x18\x02 \x01(\t\x12*\n\x04\x64\x61ta\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xb4\x02\n\x1b\x43ompleteOrchestrationAction\x12\x31\n\x13orchestrationStatus\x18\x01 \x01(\x0e\x32\x14.OrchestrationStatus\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07\x64\x65tails\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\nnewVersion\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12&\n\x0f\x63\x61rryoverEvents\x18\x05 \x03(\x0b\x32\r.HistoryEvent\x12+\n\x0e\x66\x61ilureDetails\x18\x06 \x01(\x0b\x32\x13.TaskFailureDetails\"q\n\x1cTerminateOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x0f\n\x07recurse\x18\x03 \x01(\x08\"\xfa\x02\n\x12OrchestratorAction\x12\n\n\x02id\x18\x01 \x01(\x05\x12+\n\x0cscheduleTask\x18\x02 \x01(\x0b\x32\x13.ScheduleTaskActionH\x00\x12?\n\x16\x63reateSubOrchestration\x18\x03 \x01(\x0b\x32\x1d.CreateSubOrchestrationActionH\x00\x12)\n\x0b\x63reateTimer\x18\x04 \x01(\x0b\x32\x12.CreateTimerActionH\x00\x12%\n\tsendEvent\x18\x05 \x01(\x0b\x32\x10.SendEventActionH\x00\x12=\n\x15\x63ompleteOrchestration\x18\x06 \x01(\x0b\x32\x1c.CompleteOrchestrationActionH\x00\x12?\n\x16terminateOrchestration\x18\x07 \x01(\x0b\x32\x1d.TerminateOrchestrationActionH\x00\x42\x18\n\x16orchestratorActionType\"\xda\x01\n\x13OrchestratorRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12!\n\npastEvents\x18\x03 \x03(\x0b\x32\r.HistoryEvent\x12 \n\tnewEvents\x18\x04 \x03(\x0b\x32\r.HistoryEvent\x12\x37\n\x10\x65ntityParameters\x18\x05 \x01(\x0b\x32\x1d.OrchestratorEntityParameters\"\x9d\x01\n\x14OrchestratorResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12$\n\x07\x61\x63tions\x18\x02 \x03(\x0b\x32\x13.OrchestratorAction\x12\x32\n\x0c\x63ustomStatus\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x17\n\x0f\x63ompletionToken\x18\x04 \x01(\t\"\xa3\x03\n\x15\x43reateInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12;\n\x17scheduledStartTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12?\n\x1aorchestrationIdReusePolicy\x18\x06 \x01(\x0b\x32\x1b.OrchestrationIdReusePolicy\x12\x31\n\x0b\x65xecutionId\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x04tags\x18\x08 \x03(\x0b\x32 .CreateInstanceRequest.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"w\n\x1aOrchestrationIdReusePolicy\x12-\n\x0foperationStatus\x18\x01 \x03(\x0e\x32\x14.OrchestrationStatus\x12*\n\x06\x61\x63tion\x18\x02 \x01(\x0e\x32\x1a.CreateOrchestrationAction\",\n\x16\x43reateInstanceResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\"E\n\x12GetInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x1b\n\x13getInputsAndOutputs\x18\x02 \x01(\x08\"V\n\x13GetInstanceResponse\x12\x0e\n\x06\x65xists\x18\x01 \x01(\x08\x12/\n\x12orchestrationState\x18\x02 \x01(\x0b\x32\x13.OrchestrationState\"Y\n\x15RewindInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x18\n\x16RewindInstanceResponse\"\xa4\x05\n\x12OrchestrationState\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\x13orchestrationStatus\x18\x04 \x01(\x0e\x32\x14.OrchestrationStatus\x12;\n\x17scheduledStartTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x34\n\x10\x63reatedTimestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x38\n\x14lastUpdatedTimestamp\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\x05input\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12,\n\x06output\x18\t \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0c\x63ustomStatus\x18\n \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x0b \x01(\x0b\x32\x13.TaskFailureDetails\x12\x31\n\x0b\x65xecutionId\x18\x0c \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x12\x63ompletedTimestamp\x18\r \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x36\n\x10parentInstanceId\x18\x0e \x01(\x0b\x32\x1c.google.protobuf.StringValue\"b\n\x11RaiseEventRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x14\n\x12RaiseEventResponse\"g\n\x10TerminateRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06output\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x11\n\trecursive\x18\x03 \x01(\x08\"\x13\n\x11TerminateResponse\"R\n\x0eSuspendRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x11\n\x0fSuspendResponse\"Q\n\rResumeRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x10\n\x0eResumeResponse\"6\n\x15QueryInstancesRequest\x12\x1d\n\x05query\x18\x01 \x01(\x0b\x32\x0e.InstanceQuery\"\x82\x03\n\rInstanceQuery\x12+\n\rruntimeStatus\x18\x01 \x03(\x0e\x32\x14.OrchestrationStatus\x12\x33\n\x0f\x63reatedTimeFrom\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rcreatedTimeTo\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0ctaskHubNames\x18\x04 \x03(\x0b\x32\x1c.google.protobuf.StringValue\x12\x18\n\x10maxInstanceCount\x18\x05 \x01(\x05\x12\x37\n\x11\x63ontinuationToken\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10instanceIdPrefix\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1d\n\x15\x66\x65tchInputsAndOutputs\x18\x08 \x01(\x08\"\x82\x01\n\x16QueryInstancesResponse\x12/\n\x12orchestrationState\x18\x01 \x03(\x0b\x32\x13.OrchestrationState\x12\x37\n\x11\x63ontinuationToken\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x80\x01\n\x15PurgeInstancesRequest\x12\x14\n\ninstanceId\x18\x01 \x01(\tH\x00\x12\x33\n\x13purgeInstanceFilter\x18\x02 \x01(\x0b\x32\x14.PurgeInstanceFilterH\x00\x12\x11\n\trecursive\x18\x03 \x01(\x08\x42\t\n\x07request\"\xaa\x01\n\x13PurgeInstanceFilter\x12\x33\n\x0f\x63reatedTimeFrom\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rcreatedTimeTo\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\rruntimeStatus\x18\x03 \x03(\x0e\x32\x14.OrchestrationStatus\"6\n\x16PurgeInstancesResponse\x12\x1c\n\x14\x64\x65letedInstanceCount\x18\x01 \x01(\x05\"0\n\x14\x43reateTaskHubRequest\x12\x18\n\x10recreateIfExists\x18\x01 \x01(\x08\"\x17\n\x15\x43reateTaskHubResponse\"\x16\n\x14\x44\x65leteTaskHubRequest\"\x17\n\x15\x44\x65leteTaskHubResponse\"\xaa\x01\n\x13SignalEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x11\n\trequestId\x18\x04 \x01(\t\x12\x31\n\rscheduledTime\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\x16\n\x14SignalEntityResponse\"<\n\x10GetEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x14\n\x0cincludeState\x18\x02 \x01(\x08\"D\n\x11GetEntityResponse\x12\x0e\n\x06\x65xists\x18\x01 \x01(\x08\x12\x1f\n\x06\x65ntity\x18\x02 \x01(\x0b\x32\x0f.EntityMetadata\"\xcb\x02\n\x0b\x45ntityQuery\x12:\n\x14instanceIdStartsWith\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x34\n\x10lastModifiedFrom\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0elastModifiedTo\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x14\n\x0cincludeState\x18\x04 \x01(\x08\x12\x18\n\x10includeTransient\x18\x05 \x01(\x08\x12-\n\x08pageSize\x18\x06 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12\x37\n\x11\x63ontinuationToken\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"3\n\x14QueryEntitiesRequest\x12\x1b\n\x05query\x18\x01 \x01(\x0b\x32\x0c.EntityQuery\"s\n\x15QueryEntitiesResponse\x12!\n\x08\x65ntities\x18\x01 \x03(\x0b\x32\x0f.EntityMetadata\x12\x37\n\x11\x63ontinuationToken\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xdb\x01\n\x0e\x45ntityMetadata\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x34\n\x10lastModifiedTime\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x18\n\x10\x62\x61\x63klogQueueSize\x18\x03 \x01(\x05\x12.\n\x08lockedBy\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x0fserializedState\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x8f\x01\n\x19\x43leanEntityStorageRequest\x12\x37\n\x11\x63ontinuationToken\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1b\n\x13removeEmptyEntities\x18\x02 \x01(\x08\x12\x1c\n\x14releaseOrphanedLocks\x18\x03 \x01(\x08\"\x92\x01\n\x1a\x43leanEntityStorageResponse\x12\x37\n\x11\x63ontinuationToken\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1c\n\x14\x65mptyEntitiesRemoved\x18\x02 \x01(\x05\x12\x1d\n\x15orphanedLocksReleased\x18\x03 \x01(\x05\"]\n\x1cOrchestratorEntityParameters\x12=\n\x1a\x65ntityMessageReorderWindow\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\"\x82\x01\n\x12\x45ntityBatchRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65ntityState\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12%\n\noperations\x18\x03 \x03(\x0b\x32\x11.OperationRequest\"\xb9\x01\n\x11\x45ntityBatchResult\x12!\n\x07results\x18\x01 \x03(\x0b\x32\x10.OperationResult\x12!\n\x07\x61\x63tions\x18\x02 \x03(\x0b\x32\x10.OperationAction\x12\x31\n\x0b\x65ntityState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\"e\n\x10OperationRequest\x12\x11\n\toperation\x18\x01 \x01(\t\x12\x11\n\trequestId\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"w\n\x0fOperationResult\x12*\n\x07success\x18\x01 \x01(\x0b\x32\x17.OperationResultSuccessH\x00\x12*\n\x07\x66\x61ilure\x18\x02 \x01(\x0b\x32\x17.OperationResultFailureH\x00\x42\x0c\n\nresultType\"F\n\x16OperationResultSuccess\x12,\n\x06result\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"E\n\x16OperationResultFailure\x12+\n\x0e\x66\x61ilureDetails\x18\x01 \x01(\x0b\x32\x13.TaskFailureDetails\"\x9c\x01\n\x0fOperationAction\x12\n\n\x02id\x18\x01 \x01(\x05\x12\'\n\nsendSignal\x18\x02 \x01(\x0b\x32\x11.SendSignalActionH\x00\x12=\n\x15startNewOrchestration\x18\x03 \x01(\x0b\x32\x1c.StartNewOrchestrationActionH\x00\x42\x15\n\x13operationActionType\"\x94\x01\n\x10SendSignalAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rscheduledTime\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xce\x01\n\x1bStartNewOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rscheduledTime\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"j\n\x13GetWorkItemsRequest\x12+\n#maxConcurrentOrchestrationWorkItems\x18\x01 \x01(\x05\x12&\n\x1emaxConcurrentActivityWorkItems\x18\x02 \x01(\x05\"\xe1\x01\n\x08WorkItem\x12\x33\n\x13orchestratorRequest\x18\x01 \x01(\x0b\x32\x14.OrchestratorRequestH\x00\x12+\n\x0f\x61\x63tivityRequest\x18\x02 \x01(\x0b\x32\x10.ActivityRequestH\x00\x12,\n\rentityRequest\x18\x03 \x01(\x0b\x32\x13.EntityBatchRequestH\x00\x12!\n\nhealthPing\x18\x04 \x01(\x0b\x32\x0b.HealthPingH\x00\x12\x17\n\x0f\x63ompletionToken\x18\n \x01(\tB\t\n\x07request\"\x16\n\x14\x43ompleteTaskResponse\"\x0c\n\nHealthPing*\xb5\x02\n\x13OrchestrationStatus\x12 \n\x1cORCHESTRATION_STATUS_RUNNING\x10\x00\x12\"\n\x1eORCHESTRATION_STATUS_COMPLETED\x10\x01\x12)\n%ORCHESTRATION_STATUS_CONTINUED_AS_NEW\x10\x02\x12\x1f\n\x1bORCHESTRATION_STATUS_FAILED\x10\x03\x12!\n\x1dORCHESTRATION_STATUS_CANCELED\x10\x04\x12#\n\x1fORCHESTRATION_STATUS_TERMINATED\x10\x05\x12 \n\x1cORCHESTRATION_STATUS_PENDING\x10\x06\x12\"\n\x1eORCHESTRATION_STATUS_SUSPENDED\x10\x07*A\n\x19\x43reateOrchestrationAction\x12\t\n\x05\x45RROR\x10\x00\x12\n\n\x06IGNORE\x10\x01\x12\r\n\tTERMINATE\x10\x02\x32\xfc\n\n\x15TaskHubSidecarService\x12\x37\n\x05Hello\x12\x16.google.protobuf.Empty\x1a\x16.google.protobuf.Empty\x12@\n\rStartInstance\x12\x16.CreateInstanceRequest\x1a\x17.CreateInstanceResponse\x12\x38\n\x0bGetInstance\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x41\n\x0eRewindInstance\x12\x16.RewindInstanceRequest\x1a\x17.RewindInstanceResponse\x12\x41\n\x14WaitForInstanceStart\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x46\n\x19WaitForInstanceCompletion\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x35\n\nRaiseEvent\x12\x12.RaiseEventRequest\x1a\x13.RaiseEventResponse\x12:\n\x11TerminateInstance\x12\x11.TerminateRequest\x1a\x12.TerminateResponse\x12\x34\n\x0fSuspendInstance\x12\x0f.SuspendRequest\x1a\x10.SuspendResponse\x12\x31\n\x0eResumeInstance\x12\x0e.ResumeRequest\x1a\x0f.ResumeResponse\x12\x41\n\x0eQueryInstances\x12\x16.QueryInstancesRequest\x1a\x17.QueryInstancesResponse\x12\x41\n\x0ePurgeInstances\x12\x16.PurgeInstancesRequest\x1a\x17.PurgeInstancesResponse\x12\x31\n\x0cGetWorkItems\x12\x14.GetWorkItemsRequest\x1a\t.WorkItem0\x01\x12@\n\x14\x43ompleteActivityTask\x12\x11.ActivityResponse\x1a\x15.CompleteTaskResponse\x12H\n\x18\x43ompleteOrchestratorTask\x12\x15.OrchestratorResponse\x1a\x15.CompleteTaskResponse\x12?\n\x12\x43ompleteEntityTask\x12\x12.EntityBatchResult\x1a\x15.CompleteTaskResponse\x12>\n\rCreateTaskHub\x12\x15.CreateTaskHubRequest\x1a\x16.CreateTaskHubResponse\x12>\n\rDeleteTaskHub\x12\x15.DeleteTaskHubRequest\x1a\x16.DeleteTaskHubResponse\x12;\n\x0cSignalEntity\x12\x14.SignalEntityRequest\x1a\x15.SignalEntityResponse\x12\x32\n\tGetEntity\x12\x11.GetEntityRequest\x1a\x12.GetEntityResponse\x12>\n\rQueryEntities\x12\x15.QueryEntitiesRequest\x1a\x16.QueryEntitiesResponse\x12M\n\x12\x43leanEntityStorage\x12\x1a.CleanEntityStorageRequest\x1a\x1b.CleanEntityStorageResponseBf\n1com.microsoft.durabletask.implementation.protobufZ\x10/internal/protos\xaa\x02\x1eMicrosoft.DurableTask.Protobufb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n/durabletask/internal/orchestrator_service.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1bgoogle/protobuf/empty.proto\"^\n\x15OrchestrationInstance\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xed\x01\n\x0f\x41\x63tivityRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\x12\x0e\n\x06taskId\x18\x05 \x01(\x05\x12)\n\x12parentTraceContext\x18\x06 \x01(\x0b\x32\r.TraceContext\"\xaa\x01\n\x10\x41\x63tivityResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0e\n\x06taskId\x18\x02 \x01(\x05\x12,\n\x06result\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\x12\x17\n\x0f\x63ompletionToken\x18\x05 \x01(\t\"\xb2\x01\n\x12TaskFailureDetails\x12\x11\n\terrorType\x18\x01 \x01(\t\x12\x14\n\x0c\x65rrorMessage\x18\x02 \x01(\t\x12\x30\n\nstackTrace\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x0cinnerFailure\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\x12\x16\n\x0eisNonRetriable\x18\x05 \x01(\x08\"\xbf\x01\n\x12ParentInstanceInfo\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12*\n\x04name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\"i\n\x0cTraceContext\x12\x13\n\x0btraceParent\x18\x01 \x01(\t\x12\x12\n\x06spanID\x18\x02 \x01(\tB\x02\x18\x01\x12\x30\n\ntraceState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x88\x03\n\x15\x45xecutionStartedEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\x12+\n\x0eparentInstance\x18\x05 \x01(\x0b\x32\x13.ParentInstanceInfo\x12;\n\x17scheduledStartTimestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12)\n\x12parentTraceContext\x18\x07 \x01(\x0b\x32\r.TraceContext\x12\x39\n\x13orchestrationSpanID\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xa7\x01\n\x17\x45xecutionCompletedEvent\x12\x31\n\x13orchestrationStatus\x18\x01 \x01(\x0e\x32\x14.OrchestrationStatus\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x03 \x01(\x0b\x32\x13.TaskFailureDetails\"X\n\x18\x45xecutionTerminatedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x0f\n\x07recurse\x18\x02 \x01(\x08\"\xa9\x01\n\x12TaskScheduledEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x12parentTraceContext\x18\x04 \x01(\x0b\x32\r.TraceContext\"[\n\x12TaskCompletedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"W\n\x0fTaskFailedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\"\xcf\x01\n$SubOrchestrationInstanceCreatedEvent\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x12parentTraceContext\x18\x05 \x01(\x0b\x32\r.TraceContext\"o\n&SubOrchestrationInstanceCompletedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"k\n#SubOrchestrationInstanceFailedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\"?\n\x11TimerCreatedEvent\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"N\n\x0fTimerFiredEvent\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0f\n\x07timerId\x18\x02 \x01(\x05\"\x1a\n\x18OrchestratorStartedEvent\"\x1c\n\x1aOrchestratorCompletedEvent\"_\n\x0e\x45ventSentEvent\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"M\n\x10\x45ventRaisedEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x05input\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\":\n\x0cGenericEvent\x12*\n\x04\x64\x61ta\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"D\n\x11HistoryStateEvent\x12/\n\x12orchestrationState\x18\x01 \x01(\x0b\x32\x13.OrchestrationState\"A\n\x12\x43ontinueAsNewEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"F\n\x17\x45xecutionSuspendedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"D\n\x15\x45xecutionResumedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xdc\x01\n\x1c\x45ntityOperationSignaledEvent\x12\x11\n\trequestId\x18\x01 \x01(\t\x12\x11\n\toperation\x18\x02 \x01(\t\x12\x31\n\rscheduledTime\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10targetInstanceId\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xcb\x02\n\x1a\x45ntityOperationCalledEvent\x12\x11\n\trequestId\x18\x01 \x01(\t\x12\x11\n\toperation\x18\x02 \x01(\t\x12\x31\n\rscheduledTime\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10parentInstanceId\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x37\n\x11parentExecutionId\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10targetInstanceId\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x90\x01\n\x18\x45ntityLockRequestedEvent\x12\x19\n\x11\x63riticalSectionId\x18\x01 \x01(\t\x12\x0f\n\x07lockSet\x18\x02 \x03(\t\x12\x10\n\x08position\x18\x03 \x01(\x05\x12\x36\n\x10parentInstanceId\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"`\n\x1d\x45ntityOperationCompletedEvent\x12\x11\n\trequestId\x18\x01 \x01(\t\x12,\n\x06output\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\\\n\x1a\x45ntityOperationFailedEvent\x12\x11\n\trequestId\x18\x01 \x01(\t\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\"\xa2\x01\n\x15\x45ntityUnlockSentEvent\x12\x19\n\x11\x63riticalSectionId\x18\x01 \x01(\t\x12\x36\n\x10parentInstanceId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10targetInstanceId\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"3\n\x16\x45ntityLockGrantedEvent\x12\x19\n\x11\x63riticalSectionId\x18\x01 \x01(\t\"\xac\x0c\n\x0cHistoryEvent\x12\x0f\n\x07\x65ventId\x18\x01 \x01(\x05\x12-\n\ttimestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x10\x65xecutionStarted\x18\x03 \x01(\x0b\x32\x16.ExecutionStartedEventH\x00\x12\x36\n\x12\x65xecutionCompleted\x18\x04 \x01(\x0b\x32\x18.ExecutionCompletedEventH\x00\x12\x38\n\x13\x65xecutionTerminated\x18\x05 \x01(\x0b\x32\x19.ExecutionTerminatedEventH\x00\x12,\n\rtaskScheduled\x18\x06 \x01(\x0b\x32\x13.TaskScheduledEventH\x00\x12,\n\rtaskCompleted\x18\x07 \x01(\x0b\x32\x13.TaskCompletedEventH\x00\x12&\n\ntaskFailed\x18\x08 \x01(\x0b\x32\x10.TaskFailedEventH\x00\x12P\n\x1fsubOrchestrationInstanceCreated\x18\t \x01(\x0b\x32%.SubOrchestrationInstanceCreatedEventH\x00\x12T\n!subOrchestrationInstanceCompleted\x18\n \x01(\x0b\x32\'.SubOrchestrationInstanceCompletedEventH\x00\x12N\n\x1esubOrchestrationInstanceFailed\x18\x0b \x01(\x0b\x32$.SubOrchestrationInstanceFailedEventH\x00\x12*\n\x0ctimerCreated\x18\x0c \x01(\x0b\x32\x12.TimerCreatedEventH\x00\x12&\n\ntimerFired\x18\r \x01(\x0b\x32\x10.TimerFiredEventH\x00\x12\x38\n\x13orchestratorStarted\x18\x0e \x01(\x0b\x32\x19.OrchestratorStartedEventH\x00\x12<\n\x15orchestratorCompleted\x18\x0f \x01(\x0b\x32\x1b.OrchestratorCompletedEventH\x00\x12$\n\teventSent\x18\x10 \x01(\x0b\x32\x0f.EventSentEventH\x00\x12(\n\x0b\x65ventRaised\x18\x11 \x01(\x0b\x32\x11.EventRaisedEventH\x00\x12%\n\x0cgenericEvent\x18\x12 \x01(\x0b\x32\r.GenericEventH\x00\x12*\n\x0chistoryState\x18\x13 \x01(\x0b\x32\x12.HistoryStateEventH\x00\x12,\n\rcontinueAsNew\x18\x14 \x01(\x0b\x32\x13.ContinueAsNewEventH\x00\x12\x36\n\x12\x65xecutionSuspended\x18\x15 \x01(\x0b\x32\x18.ExecutionSuspendedEventH\x00\x12\x32\n\x10\x65xecutionResumed\x18\x16 \x01(\x0b\x32\x16.ExecutionResumedEventH\x00\x12@\n\x17\x65ntityOperationSignaled\x18\x17 \x01(\x0b\x32\x1d.EntityOperationSignaledEventH\x00\x12<\n\x15\x65ntityOperationCalled\x18\x18 \x01(\x0b\x32\x1b.EntityOperationCalledEventH\x00\x12\x42\n\x18\x65ntityOperationCompleted\x18\x19 \x01(\x0b\x32\x1e.EntityOperationCompletedEventH\x00\x12<\n\x15\x65ntityOperationFailed\x18\x1a \x01(\x0b\x32\x1b.EntityOperationFailedEventH\x00\x12\x38\n\x13\x65ntityLockRequested\x18\x1b \x01(\x0b\x32\x19.EntityLockRequestedEventH\x00\x12\x34\n\x11\x65ntityLockGranted\x18\x1c \x01(\x0b\x32\x17.EntityLockGrantedEventH\x00\x12\x32\n\x10\x65ntityUnlockSent\x18\x1d \x01(\x0b\x32\x16.EntityUnlockSentEventH\x00\x42\x0b\n\teventType\"~\n\x12ScheduleTaskAction\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x9c\x01\n\x1c\x43reateSubOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"?\n\x11\x43reateTimerAction\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"u\n\x0fSendEventAction\x12(\n\x08instance\x18\x01 \x01(\x0b\x32\x16.OrchestrationInstance\x12\x0c\n\x04name\x18\x02 \x01(\t\x12*\n\x04\x64\x61ta\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xb4\x02\n\x1b\x43ompleteOrchestrationAction\x12\x31\n\x13orchestrationStatus\x18\x01 \x01(\x0e\x32\x14.OrchestrationStatus\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07\x64\x65tails\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\nnewVersion\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12&\n\x0f\x63\x61rryoverEvents\x18\x05 \x03(\x0b\x32\r.HistoryEvent\x12+\n\x0e\x66\x61ilureDetails\x18\x06 \x01(\x0b\x32\x13.TaskFailureDetails\"q\n\x1cTerminateOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x0f\n\x07recurse\x18\x03 \x01(\x08\"\xfa\x02\n\x12OrchestratorAction\x12\n\n\x02id\x18\x01 \x01(\x05\x12+\n\x0cscheduleTask\x18\x02 \x01(\x0b\x32\x13.ScheduleTaskActionH\x00\x12?\n\x16\x63reateSubOrchestration\x18\x03 \x01(\x0b\x32\x1d.CreateSubOrchestrationActionH\x00\x12)\n\x0b\x63reateTimer\x18\x04 \x01(\x0b\x32\x12.CreateTimerActionH\x00\x12%\n\tsendEvent\x18\x05 \x01(\x0b\x32\x10.SendEventActionH\x00\x12=\n\x15\x63ompleteOrchestration\x18\x06 \x01(\x0b\x32\x1c.CompleteOrchestrationActionH\x00\x12?\n\x16terminateOrchestration\x18\x07 \x01(\x0b\x32\x1d.TerminateOrchestrationActionH\x00\x42\x18\n\x16orchestratorActionType\"\xfc\x01\n\x13OrchestratorRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12!\n\npastEvents\x18\x03 \x03(\x0b\x32\r.HistoryEvent\x12 \n\tnewEvents\x18\x04 \x03(\x0b\x32\r.HistoryEvent\x12\x37\n\x10\x65ntityParameters\x18\x05 \x01(\x0b\x32\x1d.OrchestratorEntityParameters\x12 \n\x18requiresHistoryStreaming\x18\x06 \x01(\x08\"\xd6\x01\n\x14OrchestratorResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12$\n\x07\x61\x63tions\x18\x02 \x03(\x0b\x32\x13.OrchestratorAction\x12\x32\n\x0c\x63ustomStatus\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x17\n\x0f\x63ompletionToken\x18\x04 \x01(\t\x12\x37\n\x12numEventsProcessed\x18\x05 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\"\xa3\x03\n\x15\x43reateInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12;\n\x17scheduledStartTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12?\n\x1aorchestrationIdReusePolicy\x18\x06 \x01(\x0b\x32\x1b.OrchestrationIdReusePolicy\x12\x31\n\x0b\x65xecutionId\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x04tags\x18\x08 \x03(\x0b\x32 .CreateInstanceRequest.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"w\n\x1aOrchestrationIdReusePolicy\x12-\n\x0foperationStatus\x18\x01 \x03(\x0e\x32\x14.OrchestrationStatus\x12*\n\x06\x61\x63tion\x18\x02 \x01(\x0e\x32\x1a.CreateOrchestrationAction\",\n\x16\x43reateInstanceResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\"E\n\x12GetInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x1b\n\x13getInputsAndOutputs\x18\x02 \x01(\x08\"V\n\x13GetInstanceResponse\x12\x0e\n\x06\x65xists\x18\x01 \x01(\x08\x12/\n\x12orchestrationState\x18\x02 \x01(\x0b\x32\x13.OrchestrationState\"Y\n\x15RewindInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x18\n\x16RewindInstanceResponse\"\xa4\x05\n\x12OrchestrationState\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\x13orchestrationStatus\x18\x04 \x01(\x0e\x32\x14.OrchestrationStatus\x12;\n\x17scheduledStartTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x34\n\x10\x63reatedTimestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x38\n\x14lastUpdatedTimestamp\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\x05input\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12,\n\x06output\x18\t \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0c\x63ustomStatus\x18\n \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x0b \x01(\x0b\x32\x13.TaskFailureDetails\x12\x31\n\x0b\x65xecutionId\x18\x0c \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x12\x63ompletedTimestamp\x18\r \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x36\n\x10parentInstanceId\x18\x0e \x01(\x0b\x32\x1c.google.protobuf.StringValue\"b\n\x11RaiseEventRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x14\n\x12RaiseEventResponse\"g\n\x10TerminateRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06output\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x11\n\trecursive\x18\x03 \x01(\x08\"\x13\n\x11TerminateResponse\"R\n\x0eSuspendRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x11\n\x0fSuspendResponse\"Q\n\rResumeRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x10\n\x0eResumeResponse\"6\n\x15QueryInstancesRequest\x12\x1d\n\x05query\x18\x01 \x01(\x0b\x32\x0e.InstanceQuery\"\x82\x03\n\rInstanceQuery\x12+\n\rruntimeStatus\x18\x01 \x03(\x0e\x32\x14.OrchestrationStatus\x12\x33\n\x0f\x63reatedTimeFrom\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rcreatedTimeTo\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0ctaskHubNames\x18\x04 \x03(\x0b\x32\x1c.google.protobuf.StringValue\x12\x18\n\x10maxInstanceCount\x18\x05 \x01(\x05\x12\x37\n\x11\x63ontinuationToken\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10instanceIdPrefix\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1d\n\x15\x66\x65tchInputsAndOutputs\x18\x08 \x01(\x08\"\x82\x01\n\x16QueryInstancesResponse\x12/\n\x12orchestrationState\x18\x01 \x03(\x0b\x32\x13.OrchestrationState\x12\x37\n\x11\x63ontinuationToken\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x80\x01\n\x15PurgeInstancesRequest\x12\x14\n\ninstanceId\x18\x01 \x01(\tH\x00\x12\x33\n\x13purgeInstanceFilter\x18\x02 \x01(\x0b\x32\x14.PurgeInstanceFilterH\x00\x12\x11\n\trecursive\x18\x03 \x01(\x08\x42\t\n\x07request\"\xaa\x01\n\x13PurgeInstanceFilter\x12\x33\n\x0f\x63reatedTimeFrom\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rcreatedTimeTo\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\rruntimeStatus\x18\x03 \x03(\x0e\x32\x14.OrchestrationStatus\"6\n\x16PurgeInstancesResponse\x12\x1c\n\x14\x64\x65letedInstanceCount\x18\x01 \x01(\x05\"0\n\x14\x43reateTaskHubRequest\x12\x18\n\x10recreateIfExists\x18\x01 \x01(\x08\"\x17\n\x15\x43reateTaskHubResponse\"\x16\n\x14\x44\x65leteTaskHubRequest\"\x17\n\x15\x44\x65leteTaskHubResponse\"\xaa\x01\n\x13SignalEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x11\n\trequestId\x18\x04 \x01(\t\x12\x31\n\rscheduledTime\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\x16\n\x14SignalEntityResponse\"<\n\x10GetEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x14\n\x0cincludeState\x18\x02 \x01(\x08\"D\n\x11GetEntityResponse\x12\x0e\n\x06\x65xists\x18\x01 \x01(\x08\x12\x1f\n\x06\x65ntity\x18\x02 \x01(\x0b\x32\x0f.EntityMetadata\"\xcb\x02\n\x0b\x45ntityQuery\x12:\n\x14instanceIdStartsWith\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x34\n\x10lastModifiedFrom\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0elastModifiedTo\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x14\n\x0cincludeState\x18\x04 \x01(\x08\x12\x18\n\x10includeTransient\x18\x05 \x01(\x08\x12-\n\x08pageSize\x18\x06 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12\x37\n\x11\x63ontinuationToken\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"3\n\x14QueryEntitiesRequest\x12\x1b\n\x05query\x18\x01 \x01(\x0b\x32\x0c.EntityQuery\"s\n\x15QueryEntitiesResponse\x12!\n\x08\x65ntities\x18\x01 \x03(\x0b\x32\x0f.EntityMetadata\x12\x37\n\x11\x63ontinuationToken\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xdb\x01\n\x0e\x45ntityMetadata\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x34\n\x10lastModifiedTime\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x18\n\x10\x62\x61\x63klogQueueSize\x18\x03 \x01(\x05\x12.\n\x08lockedBy\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x0fserializedState\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x8f\x01\n\x19\x43leanEntityStorageRequest\x12\x37\n\x11\x63ontinuationToken\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1b\n\x13removeEmptyEntities\x18\x02 \x01(\x08\x12\x1c\n\x14releaseOrphanedLocks\x18\x03 \x01(\x08\"\x92\x01\n\x1a\x43leanEntityStorageResponse\x12\x37\n\x11\x63ontinuationToken\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1c\n\x14\x65mptyEntitiesRemoved\x18\x02 \x01(\x05\x12\x1d\n\x15orphanedLocksReleased\x18\x03 \x01(\x05\"]\n\x1cOrchestratorEntityParameters\x12=\n\x1a\x65ntityMessageReorderWindow\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\"\x82\x01\n\x12\x45ntityBatchRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65ntityState\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12%\n\noperations\x18\x03 \x03(\x0b\x32\x11.OperationRequest\"\xb9\x01\n\x11\x45ntityBatchResult\x12!\n\x07results\x18\x01 \x03(\x0b\x32\x10.OperationResult\x12!\n\x07\x61\x63tions\x18\x02 \x03(\x0b\x32\x10.OperationAction\x12\x31\n\x0b\x65ntityState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\"\x95\x01\n\rEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x13\n\x0b\x65xecutionId\x18\x02 \x01(\t\x12\x31\n\x0b\x65ntityState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12(\n\x11operationRequests\x18\x04 \x03(\x0b\x32\r.HistoryEvent\"e\n\x10OperationRequest\x12\x11\n\toperation\x18\x01 \x01(\t\x12\x11\n\trequestId\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"w\n\x0fOperationResult\x12*\n\x07success\x18\x01 \x01(\x0b\x32\x17.OperationResultSuccessH\x00\x12*\n\x07\x66\x61ilure\x18\x02 \x01(\x0b\x32\x17.OperationResultFailureH\x00\x42\x0c\n\nresultType\"F\n\x16OperationResultSuccess\x12,\n\x06result\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"E\n\x16OperationResultFailure\x12+\n\x0e\x66\x61ilureDetails\x18\x01 \x01(\x0b\x32\x13.TaskFailureDetails\"\x9c\x01\n\x0fOperationAction\x12\n\n\x02id\x18\x01 \x01(\x05\x12\'\n\nsendSignal\x18\x02 \x01(\x0b\x32\x11.SendSignalActionH\x00\x12=\n\x15startNewOrchestration\x18\x03 \x01(\x0b\x32\x1c.StartNewOrchestrationActionH\x00\x42\x15\n\x13operationActionType\"\x94\x01\n\x10SendSignalAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rscheduledTime\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xce\x01\n\x1bStartNewOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rscheduledTime\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xb9\x01\n\x13GetWorkItemsRequest\x12+\n#maxConcurrentOrchestrationWorkItems\x18\x01 \x01(\x05\x12&\n\x1emaxConcurrentActivityWorkItems\x18\x02 \x01(\x05\x12$\n\x1cmaxConcurrentEntityWorkItems\x18\x03 \x01(\x05\x12\'\n\x0c\x63\x61pabilities\x18\n \x03(\x0e\x32\x11.WorkerCapability\"\x8c\x02\n\x08WorkItem\x12\x33\n\x13orchestratorRequest\x18\x01 \x01(\x0b\x32\x14.OrchestratorRequestH\x00\x12+\n\x0f\x61\x63tivityRequest\x18\x02 \x01(\x0b\x32\x10.ActivityRequestH\x00\x12,\n\rentityRequest\x18\x03 \x01(\x0b\x32\x13.EntityBatchRequestH\x00\x12!\n\nhealthPing\x18\x04 \x01(\x0b\x32\x0b.HealthPingH\x00\x12)\n\x0f\x65ntityRequestV2\x18\x05 \x01(\x0b\x32\x0e.EntityRequestH\x00\x12\x17\n\x0f\x63ompletionToken\x18\n \x01(\tB\t\n\x07request\"\x16\n\x14\x43ompleteTaskResponse\"\x0c\n\nHealthPing\"\x84\x01\n\x1cStreamInstanceHistoryRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1d\n\x15\x66orWorkItemProcessing\x18\x03 \x01(\x08\"-\n\x0cHistoryChunk\x12\x1d\n\x06\x65vents\x18\x01 \x03(\x0b\x32\r.HistoryEvent*\xb5\x02\n\x13OrchestrationStatus\x12 \n\x1cORCHESTRATION_STATUS_RUNNING\x10\x00\x12\"\n\x1eORCHESTRATION_STATUS_COMPLETED\x10\x01\x12)\n%ORCHESTRATION_STATUS_CONTINUED_AS_NEW\x10\x02\x12\x1f\n\x1bORCHESTRATION_STATUS_FAILED\x10\x03\x12!\n\x1dORCHESTRATION_STATUS_CANCELED\x10\x04\x12#\n\x1fORCHESTRATION_STATUS_TERMINATED\x10\x05\x12 \n\x1cORCHESTRATION_STATUS_PENDING\x10\x06\x12\"\n\x1eORCHESTRATION_STATUS_SUSPENDED\x10\x07*A\n\x19\x43reateOrchestrationAction\x12\t\n\x05\x45RROR\x10\x00\x12\n\n\x06IGNORE\x10\x01\x12\r\n\tTERMINATE\x10\x02*^\n\x10WorkerCapability\x12!\n\x1dWORKER_CAPABILITY_UNSPECIFIED\x10\x00\x12\'\n#WORKER_CAPABILITY_HISTORY_STREAMING\x10\x01\x32\xc5\x0b\n\x15TaskHubSidecarService\x12\x37\n\x05Hello\x12\x16.google.protobuf.Empty\x1a\x16.google.protobuf.Empty\x12@\n\rStartInstance\x12\x16.CreateInstanceRequest\x1a\x17.CreateInstanceResponse\x12\x38\n\x0bGetInstance\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x41\n\x0eRewindInstance\x12\x16.RewindInstanceRequest\x1a\x17.RewindInstanceResponse\x12\x41\n\x14WaitForInstanceStart\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x46\n\x19WaitForInstanceCompletion\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x35\n\nRaiseEvent\x12\x12.RaiseEventRequest\x1a\x13.RaiseEventResponse\x12:\n\x11TerminateInstance\x12\x11.TerminateRequest\x1a\x12.TerminateResponse\x12\x34\n\x0fSuspendInstance\x12\x0f.SuspendRequest\x1a\x10.SuspendResponse\x12\x31\n\x0eResumeInstance\x12\x0e.ResumeRequest\x1a\x0f.ResumeResponse\x12\x41\n\x0eQueryInstances\x12\x16.QueryInstancesRequest\x1a\x17.QueryInstancesResponse\x12\x41\n\x0ePurgeInstances\x12\x16.PurgeInstancesRequest\x1a\x17.PurgeInstancesResponse\x12\x31\n\x0cGetWorkItems\x12\x14.GetWorkItemsRequest\x1a\t.WorkItem0\x01\x12@\n\x14\x43ompleteActivityTask\x12\x11.ActivityResponse\x1a\x15.CompleteTaskResponse\x12H\n\x18\x43ompleteOrchestratorTask\x12\x15.OrchestratorResponse\x1a\x15.CompleteTaskResponse\x12?\n\x12\x43ompleteEntityTask\x12\x12.EntityBatchResult\x1a\x15.CompleteTaskResponse\x12G\n\x15StreamInstanceHistory\x12\x1d.StreamInstanceHistoryRequest\x1a\r.HistoryChunk0\x01\x12>\n\rCreateTaskHub\x12\x15.CreateTaskHubRequest\x1a\x16.CreateTaskHubResponse\x12>\n\rDeleteTaskHub\x12\x15.DeleteTaskHubRequest\x1a\x16.DeleteTaskHubResponse\x12;\n\x0cSignalEntity\x12\x14.SignalEntityRequest\x1a\x15.SignalEntityResponse\x12\x32\n\tGetEntity\x12\x11.GetEntityRequest\x1a\x12.GetEntityResponse\x12>\n\rQueryEntities\x12\x15.QueryEntitiesRequest\x1a\x16.QueryEntitiesResponse\x12M\n\x12\x43leanEntityStorage\x12\x1a.CleanEntityStorageRequest\x1a\x1b.CleanEntityStorageResponseBf\n1com.microsoft.durabletask.implementation.protobufZ\x10/internal/protos\xaa\x02\x1eMicrosoft.DurableTask.Protobufb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -30,10 +30,12 @@ _globals['_TRACECONTEXT'].fields_by_name['spanID']._serialized_options = b'\030\001' _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._options = None _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_options = b'8\001' - _globals['_ORCHESTRATIONSTATUS']._serialized_start=12232 - _globals['_ORCHESTRATIONSTATUS']._serialized_end=12541 - _globals['_CREATEORCHESTRATIONACTION']._serialized_start=12543 - _globals['_CREATEORCHESTRATIONACTION']._serialized_end=12608 + _globals['_ORCHESTRATIONSTATUS']._serialized_start=14316 + _globals['_ORCHESTRATIONSTATUS']._serialized_end=14625 + _globals['_CREATEORCHESTRATIONACTION']._serialized_start=14627 + _globals['_CREATEORCHESTRATIONACTION']._serialized_end=14692 + _globals['_WORKERCAPABILITY']._serialized_start=14694 + _globals['_WORKERCAPABILITY']._serialized_end=14788 _globals['_ORCHESTRATIONINSTANCE']._serialized_start=177 _globals['_ORCHESTRATIONINSTANCE']._serialized_end=271 _globals['_ACTIVITYREQUEST']._serialized_start=274 @@ -86,128 +88,148 @@ _globals['_EXECUTIONSUSPENDEDEVENT']._serialized_end=3255 _globals['_EXECUTIONRESUMEDEVENT']._serialized_start=3257 _globals['_EXECUTIONRESUMEDEVENT']._serialized_end=3325 - _globals['_HISTORYEVENT']._serialized_start=3328 - _globals['_HISTORYEVENT']._serialized_end=4486 - _globals['_SCHEDULETASKACTION']._serialized_start=4488 - _globals['_SCHEDULETASKACTION']._serialized_end=4614 - _globals['_CREATESUBORCHESTRATIONACTION']._serialized_start=4617 - _globals['_CREATESUBORCHESTRATIONACTION']._serialized_end=4773 - _globals['_CREATETIMERACTION']._serialized_start=4775 - _globals['_CREATETIMERACTION']._serialized_end=4838 - _globals['_SENDEVENTACTION']._serialized_start=4840 - _globals['_SENDEVENTACTION']._serialized_end=4957 - _globals['_COMPLETEORCHESTRATIONACTION']._serialized_start=4960 - _globals['_COMPLETEORCHESTRATIONACTION']._serialized_end=5268 - _globals['_TERMINATEORCHESTRATIONACTION']._serialized_start=5270 - _globals['_TERMINATEORCHESTRATIONACTION']._serialized_end=5383 - _globals['_ORCHESTRATORACTION']._serialized_start=5386 - _globals['_ORCHESTRATORACTION']._serialized_end=5764 - _globals['_ORCHESTRATORREQUEST']._serialized_start=5767 - _globals['_ORCHESTRATORREQUEST']._serialized_end=5985 - _globals['_ORCHESTRATORRESPONSE']._serialized_start=5988 - _globals['_ORCHESTRATORRESPONSE']._serialized_end=6145 - _globals['_CREATEINSTANCEREQUEST']._serialized_start=6148 - _globals['_CREATEINSTANCEREQUEST']._serialized_end=6567 - _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_start=6524 - _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_end=6567 - _globals['_ORCHESTRATIONIDREUSEPOLICY']._serialized_start=6569 - _globals['_ORCHESTRATIONIDREUSEPOLICY']._serialized_end=6688 - _globals['_CREATEINSTANCERESPONSE']._serialized_start=6690 - _globals['_CREATEINSTANCERESPONSE']._serialized_end=6734 - _globals['_GETINSTANCEREQUEST']._serialized_start=6736 - _globals['_GETINSTANCEREQUEST']._serialized_end=6805 - _globals['_GETINSTANCERESPONSE']._serialized_start=6807 - _globals['_GETINSTANCERESPONSE']._serialized_end=6893 - _globals['_REWINDINSTANCEREQUEST']._serialized_start=6895 - _globals['_REWINDINSTANCEREQUEST']._serialized_end=6984 - _globals['_REWINDINSTANCERESPONSE']._serialized_start=6986 - _globals['_REWINDINSTANCERESPONSE']._serialized_end=7010 - _globals['_ORCHESTRATIONSTATE']._serialized_start=7013 - _globals['_ORCHESTRATIONSTATE']._serialized_end=7689 - _globals['_RAISEEVENTREQUEST']._serialized_start=7691 - _globals['_RAISEEVENTREQUEST']._serialized_end=7789 - _globals['_RAISEEVENTRESPONSE']._serialized_start=7791 - _globals['_RAISEEVENTRESPONSE']._serialized_end=7811 - _globals['_TERMINATEREQUEST']._serialized_start=7813 - _globals['_TERMINATEREQUEST']._serialized_end=7916 - _globals['_TERMINATERESPONSE']._serialized_start=7918 - _globals['_TERMINATERESPONSE']._serialized_end=7937 - _globals['_SUSPENDREQUEST']._serialized_start=7939 - _globals['_SUSPENDREQUEST']._serialized_end=8021 - _globals['_SUSPENDRESPONSE']._serialized_start=8023 - _globals['_SUSPENDRESPONSE']._serialized_end=8040 - _globals['_RESUMEREQUEST']._serialized_start=8042 - _globals['_RESUMEREQUEST']._serialized_end=8123 - _globals['_RESUMERESPONSE']._serialized_start=8125 - _globals['_RESUMERESPONSE']._serialized_end=8141 - _globals['_QUERYINSTANCESREQUEST']._serialized_start=8143 - _globals['_QUERYINSTANCESREQUEST']._serialized_end=8197 - _globals['_INSTANCEQUERY']._serialized_start=8200 - _globals['_INSTANCEQUERY']._serialized_end=8586 - _globals['_QUERYINSTANCESRESPONSE']._serialized_start=8589 - _globals['_QUERYINSTANCESRESPONSE']._serialized_end=8719 - _globals['_PURGEINSTANCESREQUEST']._serialized_start=8722 - _globals['_PURGEINSTANCESREQUEST']._serialized_end=8850 - _globals['_PURGEINSTANCEFILTER']._serialized_start=8853 - _globals['_PURGEINSTANCEFILTER']._serialized_end=9023 - _globals['_PURGEINSTANCESRESPONSE']._serialized_start=9025 - _globals['_PURGEINSTANCESRESPONSE']._serialized_end=9079 - _globals['_CREATETASKHUBREQUEST']._serialized_start=9081 - _globals['_CREATETASKHUBREQUEST']._serialized_end=9129 - _globals['_CREATETASKHUBRESPONSE']._serialized_start=9131 - _globals['_CREATETASKHUBRESPONSE']._serialized_end=9154 - _globals['_DELETETASKHUBREQUEST']._serialized_start=9156 - _globals['_DELETETASKHUBREQUEST']._serialized_end=9178 - _globals['_DELETETASKHUBRESPONSE']._serialized_start=9180 - _globals['_DELETETASKHUBRESPONSE']._serialized_end=9203 - _globals['_SIGNALENTITYREQUEST']._serialized_start=9206 - _globals['_SIGNALENTITYREQUEST']._serialized_end=9376 - _globals['_SIGNALENTITYRESPONSE']._serialized_start=9378 - _globals['_SIGNALENTITYRESPONSE']._serialized_end=9400 - _globals['_GETENTITYREQUEST']._serialized_start=9402 - _globals['_GETENTITYREQUEST']._serialized_end=9462 - _globals['_GETENTITYRESPONSE']._serialized_start=9464 - _globals['_GETENTITYRESPONSE']._serialized_end=9532 - _globals['_ENTITYQUERY']._serialized_start=9535 - _globals['_ENTITYQUERY']._serialized_end=9866 - _globals['_QUERYENTITIESREQUEST']._serialized_start=9868 - _globals['_QUERYENTITIESREQUEST']._serialized_end=9919 - _globals['_QUERYENTITIESRESPONSE']._serialized_start=9921 - _globals['_QUERYENTITIESRESPONSE']._serialized_end=10036 - _globals['_ENTITYMETADATA']._serialized_start=10039 - _globals['_ENTITYMETADATA']._serialized_end=10258 - _globals['_CLEANENTITYSTORAGEREQUEST']._serialized_start=10261 - _globals['_CLEANENTITYSTORAGEREQUEST']._serialized_end=10404 - _globals['_CLEANENTITYSTORAGERESPONSE']._serialized_start=10407 - _globals['_CLEANENTITYSTORAGERESPONSE']._serialized_end=10553 - _globals['_ORCHESTRATORENTITYPARAMETERS']._serialized_start=10555 - _globals['_ORCHESTRATORENTITYPARAMETERS']._serialized_end=10648 - _globals['_ENTITYBATCHREQUEST']._serialized_start=10651 - _globals['_ENTITYBATCHREQUEST']._serialized_end=10781 - _globals['_ENTITYBATCHRESULT']._serialized_start=10784 - _globals['_ENTITYBATCHRESULT']._serialized_end=10969 - _globals['_OPERATIONREQUEST']._serialized_start=10971 - _globals['_OPERATIONREQUEST']._serialized_end=11072 - _globals['_OPERATIONRESULT']._serialized_start=11074 - _globals['_OPERATIONRESULT']._serialized_end=11193 - _globals['_OPERATIONRESULTSUCCESS']._serialized_start=11195 - _globals['_OPERATIONRESULTSUCCESS']._serialized_end=11265 - _globals['_OPERATIONRESULTFAILURE']._serialized_start=11267 - _globals['_OPERATIONRESULTFAILURE']._serialized_end=11336 - _globals['_OPERATIONACTION']._serialized_start=11339 - _globals['_OPERATIONACTION']._serialized_end=11495 - _globals['_SENDSIGNALACTION']._serialized_start=11498 - _globals['_SENDSIGNALACTION']._serialized_end=11646 - _globals['_STARTNEWORCHESTRATIONACTION']._serialized_start=11649 - _globals['_STARTNEWORCHESTRATIONACTION']._serialized_end=11855 - _globals['_GETWORKITEMSREQUEST']._serialized_start=11857 - _globals['_GETWORKITEMSREQUEST']._serialized_end=11963 - _globals['_WORKITEM']._serialized_start=11966 - _globals['_WORKITEM']._serialized_end=12191 - _globals['_COMPLETETASKRESPONSE']._serialized_start=12193 - _globals['_COMPLETETASKRESPONSE']._serialized_end=12215 - _globals['_HEALTHPING']._serialized_start=12217 - _globals['_HEALTHPING']._serialized_end=12229 - _globals['_TASKHUBSIDECARSERVICE']._serialized_start=12611 - _globals['_TASKHUBSIDECARSERVICE']._serialized_end=14015 + _globals['_ENTITYOPERATIONSIGNALEDEVENT']._serialized_start=3328 + _globals['_ENTITYOPERATIONSIGNALEDEVENT']._serialized_end=3548 + _globals['_ENTITYOPERATIONCALLEDEVENT']._serialized_start=3551 + _globals['_ENTITYOPERATIONCALLEDEVENT']._serialized_end=3882 + _globals['_ENTITYLOCKREQUESTEDEVENT']._serialized_start=3885 + _globals['_ENTITYLOCKREQUESTEDEVENT']._serialized_end=4029 + _globals['_ENTITYOPERATIONCOMPLETEDEVENT']._serialized_start=4031 + _globals['_ENTITYOPERATIONCOMPLETEDEVENT']._serialized_end=4127 + _globals['_ENTITYOPERATIONFAILEDEVENT']._serialized_start=4129 + _globals['_ENTITYOPERATIONFAILEDEVENT']._serialized_end=4221 + _globals['_ENTITYUNLOCKSENTEVENT']._serialized_start=4224 + _globals['_ENTITYUNLOCKSENTEVENT']._serialized_end=4386 + _globals['_ENTITYLOCKGRANTEDEVENT']._serialized_start=4388 + _globals['_ENTITYLOCKGRANTEDEVENT']._serialized_end=4439 + _globals['_HISTORYEVENT']._serialized_start=4442 + _globals['_HISTORYEVENT']._serialized_end=6022 + _globals['_SCHEDULETASKACTION']._serialized_start=6024 + _globals['_SCHEDULETASKACTION']._serialized_end=6150 + _globals['_CREATESUBORCHESTRATIONACTION']._serialized_start=6153 + _globals['_CREATESUBORCHESTRATIONACTION']._serialized_end=6309 + _globals['_CREATETIMERACTION']._serialized_start=6311 + _globals['_CREATETIMERACTION']._serialized_end=6374 + _globals['_SENDEVENTACTION']._serialized_start=6376 + _globals['_SENDEVENTACTION']._serialized_end=6493 + _globals['_COMPLETEORCHESTRATIONACTION']._serialized_start=6496 + _globals['_COMPLETEORCHESTRATIONACTION']._serialized_end=6804 + _globals['_TERMINATEORCHESTRATIONACTION']._serialized_start=6806 + _globals['_TERMINATEORCHESTRATIONACTION']._serialized_end=6919 + _globals['_ORCHESTRATORACTION']._serialized_start=6922 + _globals['_ORCHESTRATORACTION']._serialized_end=7300 + _globals['_ORCHESTRATORREQUEST']._serialized_start=7303 + _globals['_ORCHESTRATORREQUEST']._serialized_end=7555 + _globals['_ORCHESTRATORRESPONSE']._serialized_start=7558 + _globals['_ORCHESTRATORRESPONSE']._serialized_end=7772 + _globals['_CREATEINSTANCEREQUEST']._serialized_start=7775 + _globals['_CREATEINSTANCEREQUEST']._serialized_end=8194 + _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_start=8151 + _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_end=8194 + _globals['_ORCHESTRATIONIDREUSEPOLICY']._serialized_start=8196 + _globals['_ORCHESTRATIONIDREUSEPOLICY']._serialized_end=8315 + _globals['_CREATEINSTANCERESPONSE']._serialized_start=8317 + _globals['_CREATEINSTANCERESPONSE']._serialized_end=8361 + _globals['_GETINSTANCEREQUEST']._serialized_start=8363 + _globals['_GETINSTANCEREQUEST']._serialized_end=8432 + _globals['_GETINSTANCERESPONSE']._serialized_start=8434 + _globals['_GETINSTANCERESPONSE']._serialized_end=8520 + _globals['_REWINDINSTANCEREQUEST']._serialized_start=8522 + _globals['_REWINDINSTANCEREQUEST']._serialized_end=8611 + _globals['_REWINDINSTANCERESPONSE']._serialized_start=8613 + _globals['_REWINDINSTANCERESPONSE']._serialized_end=8637 + _globals['_ORCHESTRATIONSTATE']._serialized_start=8640 + _globals['_ORCHESTRATIONSTATE']._serialized_end=9316 + _globals['_RAISEEVENTREQUEST']._serialized_start=9318 + _globals['_RAISEEVENTREQUEST']._serialized_end=9416 + _globals['_RAISEEVENTRESPONSE']._serialized_start=9418 + _globals['_RAISEEVENTRESPONSE']._serialized_end=9438 + _globals['_TERMINATEREQUEST']._serialized_start=9440 + _globals['_TERMINATEREQUEST']._serialized_end=9543 + _globals['_TERMINATERESPONSE']._serialized_start=9545 + _globals['_TERMINATERESPONSE']._serialized_end=9564 + _globals['_SUSPENDREQUEST']._serialized_start=9566 + _globals['_SUSPENDREQUEST']._serialized_end=9648 + _globals['_SUSPENDRESPONSE']._serialized_start=9650 + _globals['_SUSPENDRESPONSE']._serialized_end=9667 + _globals['_RESUMEREQUEST']._serialized_start=9669 + _globals['_RESUMEREQUEST']._serialized_end=9750 + _globals['_RESUMERESPONSE']._serialized_start=9752 + _globals['_RESUMERESPONSE']._serialized_end=9768 + _globals['_QUERYINSTANCESREQUEST']._serialized_start=9770 + _globals['_QUERYINSTANCESREQUEST']._serialized_end=9824 + _globals['_INSTANCEQUERY']._serialized_start=9827 + _globals['_INSTANCEQUERY']._serialized_end=10213 + _globals['_QUERYINSTANCESRESPONSE']._serialized_start=10216 + _globals['_QUERYINSTANCESRESPONSE']._serialized_end=10346 + _globals['_PURGEINSTANCESREQUEST']._serialized_start=10349 + _globals['_PURGEINSTANCESREQUEST']._serialized_end=10477 + _globals['_PURGEINSTANCEFILTER']._serialized_start=10480 + _globals['_PURGEINSTANCEFILTER']._serialized_end=10650 + _globals['_PURGEINSTANCESRESPONSE']._serialized_start=10652 + _globals['_PURGEINSTANCESRESPONSE']._serialized_end=10706 + _globals['_CREATETASKHUBREQUEST']._serialized_start=10708 + _globals['_CREATETASKHUBREQUEST']._serialized_end=10756 + _globals['_CREATETASKHUBRESPONSE']._serialized_start=10758 + _globals['_CREATETASKHUBRESPONSE']._serialized_end=10781 + _globals['_DELETETASKHUBREQUEST']._serialized_start=10783 + _globals['_DELETETASKHUBREQUEST']._serialized_end=10805 + _globals['_DELETETASKHUBRESPONSE']._serialized_start=10807 + _globals['_DELETETASKHUBRESPONSE']._serialized_end=10830 + _globals['_SIGNALENTITYREQUEST']._serialized_start=10833 + _globals['_SIGNALENTITYREQUEST']._serialized_end=11003 + _globals['_SIGNALENTITYRESPONSE']._serialized_start=11005 + _globals['_SIGNALENTITYRESPONSE']._serialized_end=11027 + _globals['_GETENTITYREQUEST']._serialized_start=11029 + _globals['_GETENTITYREQUEST']._serialized_end=11089 + _globals['_GETENTITYRESPONSE']._serialized_start=11091 + _globals['_GETENTITYRESPONSE']._serialized_end=11159 + _globals['_ENTITYQUERY']._serialized_start=11162 + _globals['_ENTITYQUERY']._serialized_end=11493 + _globals['_QUERYENTITIESREQUEST']._serialized_start=11495 + _globals['_QUERYENTITIESREQUEST']._serialized_end=11546 + _globals['_QUERYENTITIESRESPONSE']._serialized_start=11548 + _globals['_QUERYENTITIESRESPONSE']._serialized_end=11663 + _globals['_ENTITYMETADATA']._serialized_start=11666 + _globals['_ENTITYMETADATA']._serialized_end=11885 + _globals['_CLEANENTITYSTORAGEREQUEST']._serialized_start=11888 + _globals['_CLEANENTITYSTORAGEREQUEST']._serialized_end=12031 + _globals['_CLEANENTITYSTORAGERESPONSE']._serialized_start=12034 + _globals['_CLEANENTITYSTORAGERESPONSE']._serialized_end=12180 + _globals['_ORCHESTRATORENTITYPARAMETERS']._serialized_start=12182 + _globals['_ORCHESTRATORENTITYPARAMETERS']._serialized_end=12275 + _globals['_ENTITYBATCHREQUEST']._serialized_start=12278 + _globals['_ENTITYBATCHREQUEST']._serialized_end=12408 + _globals['_ENTITYBATCHRESULT']._serialized_start=12411 + _globals['_ENTITYBATCHRESULT']._serialized_end=12596 + _globals['_ENTITYREQUEST']._serialized_start=12599 + _globals['_ENTITYREQUEST']._serialized_end=12748 + _globals['_OPERATIONREQUEST']._serialized_start=12750 + _globals['_OPERATIONREQUEST']._serialized_end=12851 + _globals['_OPERATIONRESULT']._serialized_start=12853 + _globals['_OPERATIONRESULT']._serialized_end=12972 + _globals['_OPERATIONRESULTSUCCESS']._serialized_start=12974 + _globals['_OPERATIONRESULTSUCCESS']._serialized_end=13044 + _globals['_OPERATIONRESULTFAILURE']._serialized_start=13046 + _globals['_OPERATIONRESULTFAILURE']._serialized_end=13115 + _globals['_OPERATIONACTION']._serialized_start=13118 + _globals['_OPERATIONACTION']._serialized_end=13274 + _globals['_SENDSIGNALACTION']._serialized_start=13277 + _globals['_SENDSIGNALACTION']._serialized_end=13425 + _globals['_STARTNEWORCHESTRATIONACTION']._serialized_start=13428 + _globals['_STARTNEWORCHESTRATIONACTION']._serialized_end=13634 + _globals['_GETWORKITEMSREQUEST']._serialized_start=13637 + _globals['_GETWORKITEMSREQUEST']._serialized_end=13822 + _globals['_WORKITEM']._serialized_start=13825 + _globals['_WORKITEM']._serialized_end=14093 + _globals['_COMPLETETASKRESPONSE']._serialized_start=14095 + _globals['_COMPLETETASKRESPONSE']._serialized_end=14117 + _globals['_HEALTHPING']._serialized_start=14119 + _globals['_HEALTHPING']._serialized_end=14131 + _globals['_STREAMINSTANCEHISTORYREQUEST']._serialized_start=14134 + _globals['_STREAMINSTANCEHISTORYREQUEST']._serialized_end=14266 + _globals['_HISTORYCHUNK']._serialized_start=14268 + _globals['_HISTORYCHUNK']._serialized_end=14313 + _globals['_TASKHUBSIDECARSERVICE']._serialized_start=14791 + _globals['_TASKHUBSIDECARSERVICE']._serialized_end=16268 # @@protoc_insertion_point(module_scope) diff --git a/durabletask/internal/orchestrator_service_pb2.pyi b/durabletask/internal/orchestrator_service_pb2.pyi index 84d2af8..83d3d06 100644 --- a/durabletask/internal/orchestrator_service_pb2.pyi +++ b/durabletask/internal/orchestrator_service_pb2.pyi @@ -26,6 +26,11 @@ class CreateOrchestrationAction(int, metaclass=_enum_type_wrapper.EnumTypeWrappe ERROR: _ClassVar[CreateOrchestrationAction] IGNORE: _ClassVar[CreateOrchestrationAction] TERMINATE: _ClassVar[CreateOrchestrationAction] + +class WorkerCapability(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): + __slots__ = () + WORKER_CAPABILITY_UNSPECIFIED: _ClassVar[WorkerCapability] + WORKER_CAPABILITY_HISTORY_STREAMING: _ClassVar[WorkerCapability] ORCHESTRATION_STATUS_RUNNING: OrchestrationStatus ORCHESTRATION_STATUS_COMPLETED: OrchestrationStatus ORCHESTRATION_STATUS_CONTINUED_AS_NEW: OrchestrationStatus @@ -37,6 +42,8 @@ ORCHESTRATION_STATUS_SUSPENDED: OrchestrationStatus ERROR: CreateOrchestrationAction IGNORE: CreateOrchestrationAction TERMINATE: CreateOrchestrationAction +WORKER_CAPABILITY_UNSPECIFIED: WorkerCapability +WORKER_CAPABILITY_HISTORY_STREAMING: WorkerCapability class OrchestrationInstance(_message.Message): __slots__ = ("instanceId", "executionId") @@ -278,8 +285,84 @@ class ExecutionResumedEvent(_message.Message): input: _wrappers_pb2.StringValue def __init__(self, input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ...) -> None: ... +class EntityOperationSignaledEvent(_message.Message): + __slots__ = ("requestId", "operation", "scheduledTime", "input", "targetInstanceId") + REQUESTID_FIELD_NUMBER: _ClassVar[int] + OPERATION_FIELD_NUMBER: _ClassVar[int] + SCHEDULEDTIME_FIELD_NUMBER: _ClassVar[int] + INPUT_FIELD_NUMBER: _ClassVar[int] + TARGETINSTANCEID_FIELD_NUMBER: _ClassVar[int] + requestId: str + operation: str + scheduledTime: _timestamp_pb2.Timestamp + input: _wrappers_pb2.StringValue + targetInstanceId: _wrappers_pb2.StringValue + def __init__(self, requestId: _Optional[str] = ..., operation: _Optional[str] = ..., scheduledTime: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., targetInstanceId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ...) -> None: ... + +class EntityOperationCalledEvent(_message.Message): + __slots__ = ("requestId", "operation", "scheduledTime", "input", "parentInstanceId", "parentExecutionId", "targetInstanceId") + REQUESTID_FIELD_NUMBER: _ClassVar[int] + OPERATION_FIELD_NUMBER: _ClassVar[int] + SCHEDULEDTIME_FIELD_NUMBER: _ClassVar[int] + INPUT_FIELD_NUMBER: _ClassVar[int] + PARENTINSTANCEID_FIELD_NUMBER: _ClassVar[int] + PARENTEXECUTIONID_FIELD_NUMBER: _ClassVar[int] + TARGETINSTANCEID_FIELD_NUMBER: _ClassVar[int] + requestId: str + operation: str + scheduledTime: _timestamp_pb2.Timestamp + input: _wrappers_pb2.StringValue + parentInstanceId: _wrappers_pb2.StringValue + parentExecutionId: _wrappers_pb2.StringValue + targetInstanceId: _wrappers_pb2.StringValue + def __init__(self, requestId: _Optional[str] = ..., operation: _Optional[str] = ..., scheduledTime: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., parentInstanceId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., parentExecutionId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., targetInstanceId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ...) -> None: ... + +class EntityLockRequestedEvent(_message.Message): + __slots__ = ("criticalSectionId", "lockSet", "position", "parentInstanceId") + CRITICALSECTIONID_FIELD_NUMBER: _ClassVar[int] + LOCKSET_FIELD_NUMBER: _ClassVar[int] + POSITION_FIELD_NUMBER: _ClassVar[int] + PARENTINSTANCEID_FIELD_NUMBER: _ClassVar[int] + criticalSectionId: str + lockSet: _containers.RepeatedScalarFieldContainer[str] + position: int + parentInstanceId: _wrappers_pb2.StringValue + def __init__(self, criticalSectionId: _Optional[str] = ..., lockSet: _Optional[_Iterable[str]] = ..., position: _Optional[int] = ..., parentInstanceId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ...) -> None: ... + +class EntityOperationCompletedEvent(_message.Message): + __slots__ = ("requestId", "output") + REQUESTID_FIELD_NUMBER: _ClassVar[int] + OUTPUT_FIELD_NUMBER: _ClassVar[int] + requestId: str + output: _wrappers_pb2.StringValue + def __init__(self, requestId: _Optional[str] = ..., output: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ...) -> None: ... + +class EntityOperationFailedEvent(_message.Message): + __slots__ = ("requestId", "failureDetails") + REQUESTID_FIELD_NUMBER: _ClassVar[int] + FAILUREDETAILS_FIELD_NUMBER: _ClassVar[int] + requestId: str + failureDetails: TaskFailureDetails + def __init__(self, requestId: _Optional[str] = ..., failureDetails: _Optional[_Union[TaskFailureDetails, _Mapping]] = ...) -> None: ... + +class EntityUnlockSentEvent(_message.Message): + __slots__ = ("criticalSectionId", "parentInstanceId", "targetInstanceId") + CRITICALSECTIONID_FIELD_NUMBER: _ClassVar[int] + PARENTINSTANCEID_FIELD_NUMBER: _ClassVar[int] + TARGETINSTANCEID_FIELD_NUMBER: _ClassVar[int] + criticalSectionId: str + parentInstanceId: _wrappers_pb2.StringValue + targetInstanceId: _wrappers_pb2.StringValue + def __init__(self, criticalSectionId: _Optional[str] = ..., parentInstanceId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., targetInstanceId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ...) -> None: ... + +class EntityLockGrantedEvent(_message.Message): + __slots__ = ("criticalSectionId",) + CRITICALSECTIONID_FIELD_NUMBER: _ClassVar[int] + criticalSectionId: str + def __init__(self, criticalSectionId: _Optional[str] = ...) -> None: ... + class HistoryEvent(_message.Message): - __slots__ = ("eventId", "timestamp", "executionStarted", "executionCompleted", "executionTerminated", "taskScheduled", "taskCompleted", "taskFailed", "subOrchestrationInstanceCreated", "subOrchestrationInstanceCompleted", "subOrchestrationInstanceFailed", "timerCreated", "timerFired", "orchestratorStarted", "orchestratorCompleted", "eventSent", "eventRaised", "genericEvent", "historyState", "continueAsNew", "executionSuspended", "executionResumed") + __slots__ = ("eventId", "timestamp", "executionStarted", "executionCompleted", "executionTerminated", "taskScheduled", "taskCompleted", "taskFailed", "subOrchestrationInstanceCreated", "subOrchestrationInstanceCompleted", "subOrchestrationInstanceFailed", "timerCreated", "timerFired", "orchestratorStarted", "orchestratorCompleted", "eventSent", "eventRaised", "genericEvent", "historyState", "continueAsNew", "executionSuspended", "executionResumed", "entityOperationSignaled", "entityOperationCalled", "entityOperationCompleted", "entityOperationFailed", "entityLockRequested", "entityLockGranted", "entityUnlockSent") EVENTID_FIELD_NUMBER: _ClassVar[int] TIMESTAMP_FIELD_NUMBER: _ClassVar[int] EXECUTIONSTARTED_FIELD_NUMBER: _ClassVar[int] @@ -302,6 +385,13 @@ class HistoryEvent(_message.Message): CONTINUEASNEW_FIELD_NUMBER: _ClassVar[int] EXECUTIONSUSPENDED_FIELD_NUMBER: _ClassVar[int] EXECUTIONRESUMED_FIELD_NUMBER: _ClassVar[int] + ENTITYOPERATIONSIGNALED_FIELD_NUMBER: _ClassVar[int] + ENTITYOPERATIONCALLED_FIELD_NUMBER: _ClassVar[int] + ENTITYOPERATIONCOMPLETED_FIELD_NUMBER: _ClassVar[int] + ENTITYOPERATIONFAILED_FIELD_NUMBER: _ClassVar[int] + ENTITYLOCKREQUESTED_FIELD_NUMBER: _ClassVar[int] + ENTITYLOCKGRANTED_FIELD_NUMBER: _ClassVar[int] + ENTITYUNLOCKSENT_FIELD_NUMBER: _ClassVar[int] eventId: int timestamp: _timestamp_pb2.Timestamp executionStarted: ExecutionStartedEvent @@ -324,7 +414,14 @@ class HistoryEvent(_message.Message): continueAsNew: ContinueAsNewEvent executionSuspended: ExecutionSuspendedEvent executionResumed: ExecutionResumedEvent - def __init__(self, eventId: _Optional[int] = ..., timestamp: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., executionStarted: _Optional[_Union[ExecutionStartedEvent, _Mapping]] = ..., executionCompleted: _Optional[_Union[ExecutionCompletedEvent, _Mapping]] = ..., executionTerminated: _Optional[_Union[ExecutionTerminatedEvent, _Mapping]] = ..., taskScheduled: _Optional[_Union[TaskScheduledEvent, _Mapping]] = ..., taskCompleted: _Optional[_Union[TaskCompletedEvent, _Mapping]] = ..., taskFailed: _Optional[_Union[TaskFailedEvent, _Mapping]] = ..., subOrchestrationInstanceCreated: _Optional[_Union[SubOrchestrationInstanceCreatedEvent, _Mapping]] = ..., subOrchestrationInstanceCompleted: _Optional[_Union[SubOrchestrationInstanceCompletedEvent, _Mapping]] = ..., subOrchestrationInstanceFailed: _Optional[_Union[SubOrchestrationInstanceFailedEvent, _Mapping]] = ..., timerCreated: _Optional[_Union[TimerCreatedEvent, _Mapping]] = ..., timerFired: _Optional[_Union[TimerFiredEvent, _Mapping]] = ..., orchestratorStarted: _Optional[_Union[OrchestratorStartedEvent, _Mapping]] = ..., orchestratorCompleted: _Optional[_Union[OrchestratorCompletedEvent, _Mapping]] = ..., eventSent: _Optional[_Union[EventSentEvent, _Mapping]] = ..., eventRaised: _Optional[_Union[EventRaisedEvent, _Mapping]] = ..., genericEvent: _Optional[_Union[GenericEvent, _Mapping]] = ..., historyState: _Optional[_Union[HistoryStateEvent, _Mapping]] = ..., continueAsNew: _Optional[_Union[ContinueAsNewEvent, _Mapping]] = ..., executionSuspended: _Optional[_Union[ExecutionSuspendedEvent, _Mapping]] = ..., executionResumed: _Optional[_Union[ExecutionResumedEvent, _Mapping]] = ...) -> None: ... + entityOperationSignaled: EntityOperationSignaledEvent + entityOperationCalled: EntityOperationCalledEvent + entityOperationCompleted: EntityOperationCompletedEvent + entityOperationFailed: EntityOperationFailedEvent + entityLockRequested: EntityLockRequestedEvent + entityLockGranted: EntityLockGrantedEvent + entityUnlockSent: EntityUnlockSentEvent + def __init__(self, eventId: _Optional[int] = ..., timestamp: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., executionStarted: _Optional[_Union[ExecutionStartedEvent, _Mapping]] = ..., executionCompleted: _Optional[_Union[ExecutionCompletedEvent, _Mapping]] = ..., executionTerminated: _Optional[_Union[ExecutionTerminatedEvent, _Mapping]] = ..., taskScheduled: _Optional[_Union[TaskScheduledEvent, _Mapping]] = ..., taskCompleted: _Optional[_Union[TaskCompletedEvent, _Mapping]] = ..., taskFailed: _Optional[_Union[TaskFailedEvent, _Mapping]] = ..., subOrchestrationInstanceCreated: _Optional[_Union[SubOrchestrationInstanceCreatedEvent, _Mapping]] = ..., subOrchestrationInstanceCompleted: _Optional[_Union[SubOrchestrationInstanceCompletedEvent, _Mapping]] = ..., subOrchestrationInstanceFailed: _Optional[_Union[SubOrchestrationInstanceFailedEvent, _Mapping]] = ..., timerCreated: _Optional[_Union[TimerCreatedEvent, _Mapping]] = ..., timerFired: _Optional[_Union[TimerFiredEvent, _Mapping]] = ..., orchestratorStarted: _Optional[_Union[OrchestratorStartedEvent, _Mapping]] = ..., orchestratorCompleted: _Optional[_Union[OrchestratorCompletedEvent, _Mapping]] = ..., eventSent: _Optional[_Union[EventSentEvent, _Mapping]] = ..., eventRaised: _Optional[_Union[EventRaisedEvent, _Mapping]] = ..., genericEvent: _Optional[_Union[GenericEvent, _Mapping]] = ..., historyState: _Optional[_Union[HistoryStateEvent, _Mapping]] = ..., continueAsNew: _Optional[_Union[ContinueAsNewEvent, _Mapping]] = ..., executionSuspended: _Optional[_Union[ExecutionSuspendedEvent, _Mapping]] = ..., executionResumed: _Optional[_Union[ExecutionResumedEvent, _Mapping]] = ..., entityOperationSignaled: _Optional[_Union[EntityOperationSignaledEvent, _Mapping]] = ..., entityOperationCalled: _Optional[_Union[EntityOperationCalledEvent, _Mapping]] = ..., entityOperationCompleted: _Optional[_Union[EntityOperationCompletedEvent, _Mapping]] = ..., entityOperationFailed: _Optional[_Union[EntityOperationFailedEvent, _Mapping]] = ..., entityLockRequested: _Optional[_Union[EntityLockRequestedEvent, _Mapping]] = ..., entityLockGranted: _Optional[_Union[EntityLockGrantedEvent, _Mapping]] = ..., entityUnlockSent: _Optional[_Union[EntityUnlockSentEvent, _Mapping]] = ...) -> None: ... class ScheduleTaskAction(_message.Message): __slots__ = ("name", "version", "input") @@ -409,30 +506,34 @@ class OrchestratorAction(_message.Message): def __init__(self, id: _Optional[int] = ..., scheduleTask: _Optional[_Union[ScheduleTaskAction, _Mapping]] = ..., createSubOrchestration: _Optional[_Union[CreateSubOrchestrationAction, _Mapping]] = ..., createTimer: _Optional[_Union[CreateTimerAction, _Mapping]] = ..., sendEvent: _Optional[_Union[SendEventAction, _Mapping]] = ..., completeOrchestration: _Optional[_Union[CompleteOrchestrationAction, _Mapping]] = ..., terminateOrchestration: _Optional[_Union[TerminateOrchestrationAction, _Mapping]] = ...) -> None: ... class OrchestratorRequest(_message.Message): - __slots__ = ("instanceId", "executionId", "pastEvents", "newEvents", "entityParameters") + __slots__ = ("instanceId", "executionId", "pastEvents", "newEvents", "entityParameters", "requiresHistoryStreaming") INSTANCEID_FIELD_NUMBER: _ClassVar[int] EXECUTIONID_FIELD_NUMBER: _ClassVar[int] PASTEVENTS_FIELD_NUMBER: _ClassVar[int] NEWEVENTS_FIELD_NUMBER: _ClassVar[int] ENTITYPARAMETERS_FIELD_NUMBER: _ClassVar[int] + REQUIRESHISTORYSTREAMING_FIELD_NUMBER: _ClassVar[int] instanceId: str executionId: _wrappers_pb2.StringValue pastEvents: _containers.RepeatedCompositeFieldContainer[HistoryEvent] newEvents: _containers.RepeatedCompositeFieldContainer[HistoryEvent] entityParameters: OrchestratorEntityParameters - def __init__(self, instanceId: _Optional[str] = ..., executionId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., pastEvents: _Optional[_Iterable[_Union[HistoryEvent, _Mapping]]] = ..., newEvents: _Optional[_Iterable[_Union[HistoryEvent, _Mapping]]] = ..., entityParameters: _Optional[_Union[OrchestratorEntityParameters, _Mapping]] = ...) -> None: ... + requiresHistoryStreaming: bool + def __init__(self, instanceId: _Optional[str] = ..., executionId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., pastEvents: _Optional[_Iterable[_Union[HistoryEvent, _Mapping]]] = ..., newEvents: _Optional[_Iterable[_Union[HistoryEvent, _Mapping]]] = ..., entityParameters: _Optional[_Union[OrchestratorEntityParameters, _Mapping]] = ..., requiresHistoryStreaming: bool = ...) -> None: ... class OrchestratorResponse(_message.Message): - __slots__ = ("instanceId", "actions", "customStatus", "completionToken") + __slots__ = ("instanceId", "actions", "customStatus", "completionToken", "numEventsProcessed") INSTANCEID_FIELD_NUMBER: _ClassVar[int] ACTIONS_FIELD_NUMBER: _ClassVar[int] CUSTOMSTATUS_FIELD_NUMBER: _ClassVar[int] COMPLETIONTOKEN_FIELD_NUMBER: _ClassVar[int] + NUMEVENTSPROCESSED_FIELD_NUMBER: _ClassVar[int] instanceId: str actions: _containers.RepeatedCompositeFieldContainer[OrchestratorAction] customStatus: _wrappers_pb2.StringValue completionToken: str - def __init__(self, instanceId: _Optional[str] = ..., actions: _Optional[_Iterable[_Union[OrchestratorAction, _Mapping]]] = ..., customStatus: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., completionToken: _Optional[str] = ...) -> None: ... + numEventsProcessed: _wrappers_pb2.Int32Value + def __init__(self, instanceId: _Optional[str] = ..., actions: _Optional[_Iterable[_Union[OrchestratorAction, _Mapping]]] = ..., customStatus: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., completionToken: _Optional[str] = ..., numEventsProcessed: _Optional[_Union[_wrappers_pb2.Int32Value, _Mapping]] = ...) -> None: ... class CreateInstanceRequest(_message.Message): __slots__ = ("instanceId", "name", "version", "input", "scheduledStartTimestamp", "orchestrationIdReusePolicy", "executionId", "tags") @@ -793,6 +894,18 @@ class EntityBatchResult(_message.Message): failureDetails: TaskFailureDetails def __init__(self, results: _Optional[_Iterable[_Union[OperationResult, _Mapping]]] = ..., actions: _Optional[_Iterable[_Union[OperationAction, _Mapping]]] = ..., entityState: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., failureDetails: _Optional[_Union[TaskFailureDetails, _Mapping]] = ...) -> None: ... +class EntityRequest(_message.Message): + __slots__ = ("instanceId", "executionId", "entityState", "operationRequests") + INSTANCEID_FIELD_NUMBER: _ClassVar[int] + EXECUTIONID_FIELD_NUMBER: _ClassVar[int] + ENTITYSTATE_FIELD_NUMBER: _ClassVar[int] + OPERATIONREQUESTS_FIELD_NUMBER: _ClassVar[int] + instanceId: str + executionId: str + entityState: _wrappers_pb2.StringValue + operationRequests: _containers.RepeatedCompositeFieldContainer[HistoryEvent] + def __init__(self, instanceId: _Optional[str] = ..., executionId: _Optional[str] = ..., entityState: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., operationRequests: _Optional[_Iterable[_Union[HistoryEvent, _Mapping]]] = ...) -> None: ... + class OperationRequest(_message.Message): __slots__ = ("operation", "requestId", "input") OPERATION_FIELD_NUMBER: _ClassVar[int] @@ -860,26 +973,32 @@ class StartNewOrchestrationAction(_message.Message): def __init__(self, instanceId: _Optional[str] = ..., name: _Optional[str] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., scheduledTime: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ...) -> None: ... class GetWorkItemsRequest(_message.Message): - __slots__ = ("maxConcurrentOrchestrationWorkItems", "maxConcurrentActivityWorkItems") + __slots__ = ("maxConcurrentOrchestrationWorkItems", "maxConcurrentActivityWorkItems", "maxConcurrentEntityWorkItems", "capabilities") MAXCONCURRENTORCHESTRATIONWORKITEMS_FIELD_NUMBER: _ClassVar[int] MAXCONCURRENTACTIVITYWORKITEMS_FIELD_NUMBER: _ClassVar[int] + MAXCONCURRENTENTITYWORKITEMS_FIELD_NUMBER: _ClassVar[int] + CAPABILITIES_FIELD_NUMBER: _ClassVar[int] maxConcurrentOrchestrationWorkItems: int maxConcurrentActivityWorkItems: int - def __init__(self, maxConcurrentOrchestrationWorkItems: _Optional[int] = ..., maxConcurrentActivityWorkItems: _Optional[int] = ...) -> None: ... + maxConcurrentEntityWorkItems: int + capabilities: _containers.RepeatedScalarFieldContainer[WorkerCapability] + def __init__(self, maxConcurrentOrchestrationWorkItems: _Optional[int] = ..., maxConcurrentActivityWorkItems: _Optional[int] = ..., maxConcurrentEntityWorkItems: _Optional[int] = ..., capabilities: _Optional[_Iterable[_Union[WorkerCapability, str]]] = ...) -> None: ... class WorkItem(_message.Message): - __slots__ = ("orchestratorRequest", "activityRequest", "entityRequest", "healthPing", "completionToken") + __slots__ = ("orchestratorRequest", "activityRequest", "entityRequest", "healthPing", "entityRequestV2", "completionToken") ORCHESTRATORREQUEST_FIELD_NUMBER: _ClassVar[int] ACTIVITYREQUEST_FIELD_NUMBER: _ClassVar[int] ENTITYREQUEST_FIELD_NUMBER: _ClassVar[int] HEALTHPING_FIELD_NUMBER: _ClassVar[int] + ENTITYREQUESTV2_FIELD_NUMBER: _ClassVar[int] COMPLETIONTOKEN_FIELD_NUMBER: _ClassVar[int] orchestratorRequest: OrchestratorRequest activityRequest: ActivityRequest entityRequest: EntityBatchRequest healthPing: HealthPing + entityRequestV2: EntityRequest completionToken: str - def __init__(self, orchestratorRequest: _Optional[_Union[OrchestratorRequest, _Mapping]] = ..., activityRequest: _Optional[_Union[ActivityRequest, _Mapping]] = ..., entityRequest: _Optional[_Union[EntityBatchRequest, _Mapping]] = ..., healthPing: _Optional[_Union[HealthPing, _Mapping]] = ..., completionToken: _Optional[str] = ...) -> None: ... + def __init__(self, orchestratorRequest: _Optional[_Union[OrchestratorRequest, _Mapping]] = ..., activityRequest: _Optional[_Union[ActivityRequest, _Mapping]] = ..., entityRequest: _Optional[_Union[EntityBatchRequest, _Mapping]] = ..., healthPing: _Optional[_Union[HealthPing, _Mapping]] = ..., entityRequestV2: _Optional[_Union[EntityRequest, _Mapping]] = ..., completionToken: _Optional[str] = ...) -> None: ... class CompleteTaskResponse(_message.Message): __slots__ = () @@ -888,3 +1007,19 @@ class CompleteTaskResponse(_message.Message): class HealthPing(_message.Message): __slots__ = () def __init__(self) -> None: ... + +class StreamInstanceHistoryRequest(_message.Message): + __slots__ = ("instanceId", "executionId", "forWorkItemProcessing") + INSTANCEID_FIELD_NUMBER: _ClassVar[int] + EXECUTIONID_FIELD_NUMBER: _ClassVar[int] + FORWORKITEMPROCESSING_FIELD_NUMBER: _ClassVar[int] + instanceId: str + executionId: _wrappers_pb2.StringValue + forWorkItemProcessing: bool + def __init__(self, instanceId: _Optional[str] = ..., executionId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., forWorkItemProcessing: bool = ...) -> None: ... + +class HistoryChunk(_message.Message): + __slots__ = ("events",) + EVENTS_FIELD_NUMBER: _ClassVar[int] + events: _containers.RepeatedCompositeFieldContainer[HistoryEvent] + def __init__(self, events: _Optional[_Iterable[_Union[HistoryEvent, _Mapping]]] = ...) -> None: ... diff --git a/durabletask/internal/orchestrator_service_pb2_grpc.py b/durabletask/internal/orchestrator_service_pb2_grpc.py index 3638bf6..ea61301 100644 --- a/durabletask/internal/orchestrator_service_pb2_grpc.py +++ b/durabletask/internal/orchestrator_service_pb2_grpc.py @@ -95,6 +95,11 @@ def __init__(self, channel): request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.EntityBatchResult.SerializeToString, response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CompleteTaskResponse.FromString, ) + self.StreamInstanceHistory = channel.unary_stream( + '/TaskHubSidecarService/StreamInstanceHistory', + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.StreamInstanceHistoryRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.HistoryChunk.FromString, + ) self.CreateTaskHub = channel.unary_unary( '/TaskHubSidecarService/CreateTaskHub', request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CreateTaskHubRequest.SerializeToString, @@ -238,6 +243,13 @@ def CompleteEntityTask(self, request, context): context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') + def StreamInstanceHistory(self, request, context): + """Gets the history of an orchestration instance as a stream of events. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + def CreateTaskHub(self, request, context): """Deletes and Creates the necessary resources for the orchestration service and the instance store """ @@ -363,6 +375,11 @@ def add_TaskHubSidecarServiceServicer_to_server(servicer, server): request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.EntityBatchResult.FromString, response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CompleteTaskResponse.SerializeToString, ), + 'StreamInstanceHistory': grpc.unary_stream_rpc_method_handler( + servicer.StreamInstanceHistory, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.StreamInstanceHistoryRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.HistoryChunk.SerializeToString, + ), 'CreateTaskHub': grpc.unary_unary_rpc_method_handler( servicer.CreateTaskHub, request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CreateTaskHubRequest.FromString, @@ -675,6 +692,23 @@ def CompleteEntityTask(request, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + @staticmethod + def StreamInstanceHistory(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_stream(request, target, '/TaskHubSidecarService/StreamInstanceHistory', + durabletask_dot_internal_dot_orchestrator__service__pb2.StreamInstanceHistoryRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.HistoryChunk.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + @staticmethod def CreateTaskHub(request, target, From 35b4f1c911a34a142c6366bce78060c14d379391 Mon Sep 17 00:00:00 2001 From: Elena Kolevska Date: Sat, 8 Feb 2025 00:27:40 +0000 Subject: [PATCH 09/81] =?UTF-8?q?Use=20dapr=E2=80=99s=20durabletask-go=20f?= =?UTF-8?q?ork?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Elena Kolevska --- .github/workflows/pr-validation.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pr-validation.yml b/.github/workflows/pr-validation.yml index 1bf04a8..63540ac 100644 --- a/.github/workflows/pr-validation.yml +++ b/.github/workflows/pr-validation.yml @@ -44,7 +44,7 @@ jobs: # Install and run the durabletask-go sidecar for running e2e tests - name: Pytest e2e tests run: | - go install github.com/microsoft/durabletask-go@main + go install github.com/dapr/durabletask-go@main durabletask-go --port 4001 & pytest -m "e2e" --verbose publish: From 8802d445fd0fe4435a904480e1b54484700aee11 Mon Sep 17 00:00:00 2001 From: Elena Kolevska Date: Tue, 11 Feb 2025 16:51:29 +0000 Subject: [PATCH 10/81] Extend the recursive termination test Signed-off-by: Elena Kolevska --- README.md | 5 ++- tests/test_orchestration_e2e.py | 74 ++++++++++++++++++++------------- 2 files changed, 49 insertions(+), 30 deletions(-) diff --git a/README.md b/README.md index b11fc29..4a45d9b 100644 --- a/README.md +++ b/README.md @@ -178,10 +178,11 @@ make test-unit ### Running E2E tests -The E2E (end-to-end) tests require a sidecar process to be running. You can use the Dapr sidecar for this or run a Durable Task test sidecar using the following `docker` command: +The E2E (end-to-end) tests require a sidecar process to be running. You can use the Dapr sidecar for this or run a Durable Task test sidecar using the following command: ```sh -docker run --name durabletask-sidecar -p 4001:4001 --env 'DURABLETASK_SIDECAR_LOGLEVEL=Debug' --rm cgillum/durabletask-sidecar:latest start --backend Emulator +go install github.com/dapr/durabletask-go@main +durabletask-go --port 4001 ``` To run the E2E tests, run the following command from the project root: diff --git a/tests/test_orchestration_e2e.py b/tests/test_orchestration_e2e.py index d3d7f0b..8466f49 100644 --- a/tests/test_orchestration_e2e.py +++ b/tests/test_orchestration_e2e.py @@ -279,39 +279,57 @@ def orchestrator(ctx: task.OrchestrationContext, _): assert state.serialized_output == json.dumps("some reason for termination") def test_terminate_recursive(): - def root(ctx: task.OrchestrationContext, _): - result = yield ctx.call_sub_orchestrator(child) - return result - def child(ctx: task.OrchestrationContext, _): - result = yield ctx.wait_for_external_event("my_event") - return result + thread_lock = threading.Lock() + activity_counter = 0 + delay_time = 4 # seconds - # Start a worker, which will connect to the sidecar in a background thread - with worker.TaskHubGrpcWorker() as w: - w.add_orchestrator(root) - w.add_orchestrator(child) - w.start() + def increment(ctx, _): + with thread_lock: + nonlocal activity_counter + activity_counter += 1 + raise Exception("Failed: Should not have executed the activity") - task_hub_client = client.TaskHubGrpcClient() - id = task_hub_client.schedule_new_orchestration(root) - state = task_hub_client.wait_for_orchestration_start(id, timeout=30) - assert state is not None - assert state.runtime_status == client.OrchestrationStatus.RUNNING + def orchestrator_child(ctx: task.OrchestrationContext, activity_count: int): + due_time = ctx.current_utc_datetime + timedelta(seconds=delay_time) + yield ctx.create_timer(due_time) + yield ctx.call_activity(increment) - # Terminate root orchestration(recursive set to True by default) - task_hub_client.terminate_orchestration(id, output="some reason for termination") - state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) - assert state is not None - assert state.runtime_status == client.OrchestrationStatus.TERMINATED + def parent_orchestrator(ctx: task.OrchestrationContext, count: int): + tasks = [] + for _ in range(count): + tasks.append(ctx.call_sub_orchestrator(orchestrator_child, input=count)) + yield task.when_all(tasks) - # Verify that child orchestration is also terminated - c = task_hub_client.wait_for_orchestration_completion(id, timeout=30) - assert state is not None - assert state.runtime_status == client.OrchestrationStatus.TERMINATED + for recurse in [True, False]: + with worker.TaskHubGrpcWorker() as w: + w.add_activity(increment) + w.add_orchestrator(orchestrator_child) + w.add_orchestrator(parent_orchestrator) + w.start() + + task_hub_client = client.TaskHubGrpcClient() + instance_id = task_hub_client.schedule_new_orchestration(parent_orchestrator, input=5) + + time.sleep(2) + + output = "Recursive termination = {recurse}" + task_hub_client.terminate_orchestration(instance_id, output=output, recursive=recurse) + + + metadata = task_hub_client.wait_for_orchestration_completion(instance_id, timeout=30) + + assert metadata is not None + assert metadata.runtime_status == client.OrchestrationStatus.TERMINATED + assert metadata.serialized_output == f'"{output}"' + + time.sleep(delay_time) + + if recurse: + assert activity_counter == 0, "Activity should not have executed with recursive termination" + else: + assert metadata is not None + assert activity_counter == 5, "Activity should have executed without recursive termination" - task_hub_client.purge_orchestration(id) - state = task_hub_client.get_orchestration_state(id) - assert state is None def test_continue_as_new(): From 16ca4206b9ea2a2ebe0bfb99f6383de946d5fc16 Mon Sep 17 00:00:00 2001 From: Elena Kolevska Date: Tue, 11 Feb 2025 17:06:00 +0000 Subject: [PATCH 11/81] redundant line Signed-off-by: Elena Kolevska --- tests/test_orchestration_e2e.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/test_orchestration_e2e.py b/tests/test_orchestration_e2e.py index 8466f49..bcb3d3c 100644 --- a/tests/test_orchestration_e2e.py +++ b/tests/test_orchestration_e2e.py @@ -327,7 +327,6 @@ def parent_orchestrator(ctx: task.OrchestrationContext, count: int): if recurse: assert activity_counter == 0, "Activity should not have executed with recursive termination" else: - assert metadata is not None assert activity_counter == 5, "Activity should have executed without recursive termination" From f430bc2ec1dbee488297331743b3ab71e1b92d65 Mon Sep 17 00:00:00 2001 From: Elena Kolevska Date: Wed, 19 Feb 2025 00:25:40 +0000 Subject: [PATCH 12/81] Removes default timeout for `wait_for_orchestration_start` and `wait_for_orchestration_completion` Signed-off-by: Elena Kolevska --- durabletask/client.py | 16 ++++++----- tests/test_client.py | 63 +++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 71 insertions(+), 8 deletions(-) diff --git a/durabletask/client.py b/durabletask/client.py index 31953ae..fae968d 100644 --- a/durabletask/client.py +++ b/durabletask/client.py @@ -129,11 +129,13 @@ def get_orchestration_state(self, instance_id: str, *, fetch_payloads: bool = Tr def wait_for_orchestration_start(self, instance_id: str, *, fetch_payloads: bool = False, - timeout: int = 60) -> Optional[OrchestrationState]: + timeout: int = 0) -> Optional[OrchestrationState]: req = pb.GetInstanceRequest(instanceId=instance_id, getInputsAndOutputs=fetch_payloads) try: - self._logger.info(f"Waiting up to {timeout}s for instance '{instance_id}' to start.") - res: pb.GetInstanceResponse = self._stub.WaitForInstanceStart(req, timeout=timeout) + grpc_timeout = None if timeout == 0 else timeout + self._logger.info( + f"Waiting {'indefinitely' if timeout == 0 else f'up to {timeout}s'} for instance '{instance_id}' to start.") + res: pb.GetInstanceResponse = self._stub.WaitForInstanceStart(req, timeout=grpc_timeout) return new_orchestration_state(req.instanceId, res) except grpc.RpcError as rpc_error: if rpc_error.code() == grpc.StatusCode.DEADLINE_EXCEEDED: # type: ignore @@ -144,11 +146,13 @@ def wait_for_orchestration_start(self, instance_id: str, *, def wait_for_orchestration_completion(self, instance_id: str, *, fetch_payloads: bool = True, - timeout: int = 60) -> Optional[OrchestrationState]: + timeout: int = 0) -> Optional[OrchestrationState]: req = pb.GetInstanceRequest(instanceId=instance_id, getInputsAndOutputs=fetch_payloads) try: - self._logger.info(f"Waiting {timeout}s for instance '{instance_id}' to complete.") - res: pb.GetInstanceResponse = self._stub.WaitForInstanceCompletion(req, timeout=timeout) + grpc_timeout = None if timeout == 0 else timeout + self._logger.info( + f"Waiting {'indefinitely' if timeout == 0 else f'up to {timeout}s'} for instance '{instance_id}' to complete.") + res: pb.GetInstanceResponse = self._stub.WaitForInstanceCompletion(req, timeout=grpc_timeout) state = new_orchestration_state(req.instanceId, res) if not state: return None diff --git a/tests/test_client.py b/tests/test_client.py index caacf65..5990db0 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -1,8 +1,10 @@ -from unittest.mock import patch, ANY +from unittest.mock import patch, ANY, Mock +from durabletask.client import TaskHubGrpcClient from durabletask.internal.shared import (DefaultClientInterceptorImpl, get_default_host_address, get_grpc_channel) +import pytest HOST_ADDRESS = 'localhost:50051' METADATA = [('key1', 'value1'), ('key2', 'value2')] @@ -85,4 +87,61 @@ def test_grpc_channel_with_host_name_protocol_stripping(): prefix = "" get_grpc_channel(prefix + host_name, METADATA, True) - mock_secure_channel.assert_called_with(host_name, ANY) \ No newline at end of file + mock_secure_channel.assert_called_with(host_name, ANY) + + +@pytest.mark.parametrize("timeout", [None, 0, 5]) +def test_wait_for_orchestration_start_timeout(timeout): + instance_id = "test-instance" + + from durabletask.internal.orchestrator_service_pb2 import GetInstanceResponse, \ + OrchestrationState, ORCHESTRATION_STATUS_RUNNING + + response = GetInstanceResponse() + state = OrchestrationState() + state.instanceId = instance_id + state.orchestrationStatus = ORCHESTRATION_STATUS_RUNNING + response.orchestrationState.CopyFrom(state) + + c = TaskHubGrpcClient() + c._stub = Mock() + c._stub.WaitForInstanceStart.return_value = response + + grpc_timeout = None if timeout is None else timeout + c.wait_for_orchestration_start(instance_id, timeout=grpc_timeout) + + # Verify WaitForInstanceStart was called with timeout=None + c._stub.WaitForInstanceStart.assert_called_once() + _, kwargs = c._stub.WaitForInstanceStart.call_args + if timeout is None or timeout == 0: + assert kwargs.get('timeout') is None + else: + assert kwargs.get('timeout') == timeout + +@pytest.mark.parametrize("timeout", [None, 0, 5]) +def test_wait_for_orchestration_completion_timeout(timeout): + instance_id = "test-instance" + + from durabletask.internal.orchestrator_service_pb2 import GetInstanceResponse, \ + OrchestrationState, ORCHESTRATION_STATUS_COMPLETED + + response = GetInstanceResponse() + state = OrchestrationState() + state.instanceId = instance_id + state.orchestrationStatus = ORCHESTRATION_STATUS_COMPLETED + response.orchestrationState.CopyFrom(state) + + c = TaskHubGrpcClient() + c._stub = Mock() + c._stub.WaitForInstanceCompletion.return_value = response + + grpc_timeout = None if timeout is None else timeout + c.wait_for_orchestration_completion(instance_id, timeout=grpc_timeout) + + # Verify WaitForInstanceStart was called with timeout=None + c._stub.WaitForInstanceCompletion.assert_called_once() + _, kwargs = c._stub.WaitForInstanceCompletion.call_args + if timeout is None or timeout == 0: + assert kwargs.get('timeout') is None + else: + assert kwargs.get('timeout') == timeout \ No newline at end of file From d53cf5c63569ef27043c68d365e73ebb74f4d304 Mon Sep 17 00:00:00 2001 From: Chris Gillum Date: Mon, 6 Jan 2025 08:35:13 -0800 Subject: [PATCH 13/81] Update version to 0.2b1, require Python 3.9+, and enhance GitHub Actions workflow (#1) (#35) - Bump version in `pyproject.toml` to 0.2b1 and update Python requirement to >=3.9. - Add `protobuf` dependency in `requirements.txt`. - Update GitHub Actions workflow to support Python versions 3.9 to 3.13 and upgrade action versions. - Refactor type hints in various files to use `Optional` and `list` instead of `Union` and `List`. - Improve handling of custom status in orchestration context and related functions. - Fix purge implementation to pass required parameters. Signed-off-by: Albert Callarisa --- .github/workflows/pr-validation.yml | 19 +++++++-- .vscode/settings.json | 5 ++- durabletask/client.py | 40 +++++++++---------- durabletask/internal/grpc_interceptor.py | 3 +- durabletask/internal/helpers.py | 32 +++++++-------- durabletask/internal/shared.py | 15 ++++--- durabletask/task.py | 30 ++++++++------ durabletask/worker.py | 50 ++++++++++++------------ examples/fanout_fanin.py | 7 ++-- pyproject.toml | 4 +- requirements.txt | 1 + tests/test_activity_executor.py | 4 +- tests/test_orchestration_e2e.py | 2 +- tests/test_orchestration_executor.py | 3 +- 14 files changed, 118 insertions(+), 97 deletions(-) diff --git a/.github/workflows/pr-validation.yml b/.github/workflows/pr-validation.yml index 4c09e6b..70ff470 100644 --- a/.github/workflows/pr-validation.yml +++ b/.github/workflows/pr-validation.yml @@ -16,12 +16,12 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.8", "3.9", "3.10", "3.11"] + python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v3 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - name: Install dependencies @@ -35,3 +35,16 @@ jobs: - name: Pytest unit tests run: | pytest -m "not e2e" --verbose + + # Sidecar for running e2e tests requires Go SDK + - name: Install Go SDK + uses: actions/setup-go@v5 + with: + go-version: 'stable' + + # Install and run the durabletask-go sidecar for running e2e tests + - name: Pytest e2e tests + run: | + go install github.com/microsoft/durabletask-go@main + durabletask-go --port 4001 & + pytest -m "e2e" --verbose diff --git a/.vscode/settings.json b/.vscode/settings.json index d737b0b..1c929ac 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -3,7 +3,7 @@ "editor.defaultFormatter": "ms-python.autopep8", "editor.formatOnSave": true, "editor.codeActionsOnSave": { - "source.organizeImports": true, + "source.organizeImports": "explicit" }, "editor.rulers": [ 119 @@ -29,5 +29,6 @@ "coverage.xml", "jacoco.xml", "coverage.cobertura.xml" - ] + ], + "makefile.configureOnOpen": false } \ No newline at end of file diff --git a/durabletask/client.py b/durabletask/client.py index 82f920a..31953ae 100644 --- a/durabletask/client.py +++ b/durabletask/client.py @@ -6,7 +6,7 @@ from dataclasses import dataclass from datetime import datetime from enum import Enum -from typing import Any, List, Tuple, TypeVar, Union +from typing import Any, Optional, TypeVar, Union import grpc from google.protobuf import wrappers_pb2 @@ -42,10 +42,10 @@ class OrchestrationState: runtime_status: OrchestrationStatus created_at: datetime last_updated_at: datetime - serialized_input: Union[str, None] - serialized_output: Union[str, None] - serialized_custom_status: Union[str, None] - failure_details: Union[task.FailureDetails, None] + serialized_input: Optional[str] + serialized_output: Optional[str] + serialized_custom_status: Optional[str] + failure_details: Optional[task.FailureDetails] def raise_if_failed(self): if self.failure_details is not None: @@ -64,7 +64,7 @@ def failure_details(self): return self._failure_details -def new_orchestration_state(instance_id: str, res: pb.GetInstanceResponse) -> Union[OrchestrationState, None]: +def new_orchestration_state(instance_id: str, res: pb.GetInstanceResponse) -> Optional[OrchestrationState]: if not res.exists: return None @@ -92,20 +92,20 @@ def new_orchestration_state(instance_id: str, res: pb.GetInstanceResponse) -> Un class TaskHubGrpcClient: def __init__(self, *, - host_address: Union[str, None] = None, - metadata: Union[List[Tuple[str, str]], None] = None, - log_handler = None, - log_formatter: Union[logging.Formatter, None] = None, + host_address: Optional[str] = None, + metadata: Optional[list[tuple[str, str]]] = None, + log_handler: Optional[logging.Handler] = None, + log_formatter: Optional[logging.Formatter] = None, secure_channel: bool = False): channel = shared.get_grpc_channel(host_address, metadata, secure_channel=secure_channel) self._stub = stubs.TaskHubSidecarServiceStub(channel) self._logger = shared.get_logger("client", log_handler, log_formatter) def schedule_new_orchestration(self, orchestrator: Union[task.Orchestrator[TInput, TOutput], str], *, - input: Union[TInput, None] = None, - instance_id: Union[str, None] = None, - start_at: Union[datetime, None] = None, - reuse_id_policy: Union[pb.OrchestrationIdReusePolicy, None] = None) -> str: + input: Optional[TInput] = None, + instance_id: Optional[str] = None, + start_at: Optional[datetime] = None, + reuse_id_policy: Optional[pb.OrchestrationIdReusePolicy] = None) -> str: name = orchestrator if isinstance(orchestrator, str) else task.get_name(orchestrator) @@ -122,14 +122,14 @@ def schedule_new_orchestration(self, orchestrator: Union[task.Orchestrator[TInpu res: pb.CreateInstanceResponse = self._stub.StartInstance(req) return res.instanceId - def get_orchestration_state(self, instance_id: str, *, fetch_payloads: bool = True) -> Union[OrchestrationState, None]: + def get_orchestration_state(self, instance_id: str, *, fetch_payloads: bool = True) -> Optional[OrchestrationState]: req = pb.GetInstanceRequest(instanceId=instance_id, getInputsAndOutputs=fetch_payloads) res: pb.GetInstanceResponse = self._stub.GetInstance(req) return new_orchestration_state(req.instanceId, res) def wait_for_orchestration_start(self, instance_id: str, *, fetch_payloads: bool = False, - timeout: int = 60) -> Union[OrchestrationState, None]: + timeout: int = 60) -> Optional[OrchestrationState]: req = pb.GetInstanceRequest(instanceId=instance_id, getInputsAndOutputs=fetch_payloads) try: self._logger.info(f"Waiting up to {timeout}s for instance '{instance_id}' to start.") @@ -144,7 +144,7 @@ def wait_for_orchestration_start(self, instance_id: str, *, def wait_for_orchestration_completion(self, instance_id: str, *, fetch_payloads: bool = True, - timeout: int = 60) -> Union[OrchestrationState, None]: + timeout: int = 60) -> Optional[OrchestrationState]: req = pb.GetInstanceRequest(instanceId=instance_id, getInputsAndOutputs=fetch_payloads) try: self._logger.info(f"Waiting {timeout}s for instance '{instance_id}' to complete.") @@ -170,7 +170,7 @@ def wait_for_orchestration_completion(self, instance_id: str, *, raise def raise_orchestration_event(self, instance_id: str, event_name: str, *, - data: Union[Any, None] = None): + data: Optional[Any] = None): req = pb.RaiseEventRequest( instanceId=instance_id, name=event_name, @@ -180,7 +180,7 @@ def raise_orchestration_event(self, instance_id: str, event_name: str, *, self._stub.RaiseEvent(req) def terminate_orchestration(self, instance_id: str, *, - output: Union[Any, None] = None, + output: Optional[Any] = None, recursive: bool = True): req = pb.TerminateRequest( instanceId=instance_id, @@ -203,4 +203,4 @@ def resume_orchestration(self, instance_id: str): def purge_orchestration(self, instance_id: str, recursive: bool = True): req = pb.PurgeInstancesRequest(instanceId=instance_id, recursive=recursive) self._logger.info(f"Purging instance '{instance_id}'.") - self._stub.PurgeInstances() + self._stub.PurgeInstances(req) diff --git a/durabletask/internal/grpc_interceptor.py b/durabletask/internal/grpc_interceptor.py index 5b12ace..738fca9 100644 --- a/durabletask/internal/grpc_interceptor.py +++ b/durabletask/internal/grpc_interceptor.py @@ -2,7 +2,6 @@ # Licensed under the MIT License. from collections import namedtuple -from typing import List, Tuple import grpc @@ -26,7 +25,7 @@ class DefaultClientInterceptorImpl ( StreamUnaryClientInterceptor and StreamStreamClientInterceptor from grpc to add an interceptor to add additional headers to all calls as needed.""" - def __init__(self, metadata: List[Tuple[str, str]]): + def __init__(self, metadata: list[tuple[str, str]]): super().__init__() self._metadata = metadata diff --git a/durabletask/internal/helpers.py b/durabletask/internal/helpers.py index c7354e5..6b36586 100644 --- a/durabletask/internal/helpers.py +++ b/durabletask/internal/helpers.py @@ -3,7 +3,7 @@ import traceback from datetime import datetime -from typing import List, Union +from typing import Optional from google.protobuf import timestamp_pb2, wrappers_pb2 @@ -12,14 +12,14 @@ # TODO: The new_xxx_event methods are only used by test code and should be moved elsewhere -def new_orchestrator_started_event(timestamp: Union[datetime, None] = None) -> pb.HistoryEvent: +def new_orchestrator_started_event(timestamp: Optional[datetime] = None) -> pb.HistoryEvent: ts = timestamp_pb2.Timestamp() if timestamp is not None: ts.FromDatetime(timestamp) return pb.HistoryEvent(eventId=-1, timestamp=ts, orchestratorStarted=pb.OrchestratorStartedEvent()) -def new_execution_started_event(name: str, instance_id: str, encoded_input: Union[str, None] = None) -> pb.HistoryEvent: +def new_execution_started_event(name: str, instance_id: str, encoded_input: Optional[str] = None) -> pb.HistoryEvent: return pb.HistoryEvent( eventId=-1, timestamp=timestamp_pb2.Timestamp(), @@ -49,7 +49,7 @@ def new_timer_fired_event(timer_id: int, fire_at: datetime) -> pb.HistoryEvent: ) -def new_task_scheduled_event(event_id: int, name: str, encoded_input: Union[str, None] = None) -> pb.HistoryEvent: +def new_task_scheduled_event(event_id: int, name: str, encoded_input: Optional[str] = None) -> pb.HistoryEvent: return pb.HistoryEvent( eventId=event_id, timestamp=timestamp_pb2.Timestamp(), @@ -57,7 +57,7 @@ def new_task_scheduled_event(event_id: int, name: str, encoded_input: Union[str, ) -def new_task_completed_event(event_id: int, encoded_output: Union[str, None] = None) -> pb.HistoryEvent: +def new_task_completed_event(event_id: int, encoded_output: Optional[str] = None) -> pb.HistoryEvent: return pb.HistoryEvent( eventId=-1, timestamp=timestamp_pb2.Timestamp(), @@ -77,7 +77,7 @@ def new_sub_orchestration_created_event( event_id: int, name: str, instance_id: str, - encoded_input: Union[str, None] = None) -> pb.HistoryEvent: + encoded_input: Optional[str] = None) -> pb.HistoryEvent: return pb.HistoryEvent( eventId=event_id, timestamp=timestamp_pb2.Timestamp(), @@ -88,7 +88,7 @@ def new_sub_orchestration_created_event( ) -def new_sub_orchestration_completed_event(event_id: int, encoded_output: Union[str, None] = None) -> pb.HistoryEvent: +def new_sub_orchestration_completed_event(event_id: int, encoded_output: Optional[str] = None) -> pb.HistoryEvent: return pb.HistoryEvent( eventId=-1, timestamp=timestamp_pb2.Timestamp(), @@ -116,7 +116,7 @@ def new_failure_details(ex: Exception) -> pb.TaskFailureDetails: ) -def new_event_raised_event(name: str, encoded_input: Union[str, None] = None) -> pb.HistoryEvent: +def new_event_raised_event(name: str, encoded_input: Optional[str] = None) -> pb.HistoryEvent: return pb.HistoryEvent( eventId=-1, timestamp=timestamp_pb2.Timestamp(), @@ -140,7 +140,7 @@ def new_resume_event() -> pb.HistoryEvent: ) -def new_terminated_event(*, encoded_output: Union[str, None] = None) -> pb.HistoryEvent: +def new_terminated_event(*, encoded_output: Optional[str] = None) -> pb.HistoryEvent: return pb.HistoryEvent( eventId=-1, timestamp=timestamp_pb2.Timestamp(), @@ -150,7 +150,7 @@ def new_terminated_event(*, encoded_output: Union[str, None] = None) -> pb.Histo ) -def get_string_value(val: Union[str, None]) -> Union[wrappers_pb2.StringValue, None]: +def get_string_value(val: Optional[str]) -> Optional[wrappers_pb2.StringValue]: if val is None: return None else: @@ -160,9 +160,9 @@ def get_string_value(val: Union[str, None]) -> Union[wrappers_pb2.StringValue, N def new_complete_orchestration_action( id: int, status: pb.OrchestrationStatus, - result: Union[str, None] = None, - failure_details: Union[pb.TaskFailureDetails, None] = None, - carryover_events: Union[List[pb.HistoryEvent], None] = None) -> pb.OrchestratorAction: + result: Optional[str] = None, + failure_details: Optional[pb.TaskFailureDetails] = None, + carryover_events: Optional[list[pb.HistoryEvent]] = None) -> pb.OrchestratorAction: completeOrchestrationAction = pb.CompleteOrchestrationAction( orchestrationStatus=status, result=get_string_value(result), @@ -178,7 +178,7 @@ def new_create_timer_action(id: int, fire_at: datetime) -> pb.OrchestratorAction return pb.OrchestratorAction(id=id, createTimer=pb.CreateTimerAction(fireAt=timestamp)) -def new_schedule_task_action(id: int, name: str, encoded_input: Union[str, None]) -> pb.OrchestratorAction: +def new_schedule_task_action(id: int, name: str, encoded_input: Optional[str]) -> pb.OrchestratorAction: return pb.OrchestratorAction(id=id, scheduleTask=pb.ScheduleTaskAction( name=name, input=get_string_value(encoded_input) @@ -194,8 +194,8 @@ def new_timestamp(dt: datetime) -> timestamp_pb2.Timestamp: def new_create_sub_orchestration_action( id: int, name: str, - instance_id: Union[str, None], - encoded_input: Union[str, None]) -> pb.OrchestratorAction: + instance_id: Optional[str], + encoded_input: Optional[str]) -> pb.OrchestratorAction: return pb.OrchestratorAction(id=id, createSubOrchestration=pb.CreateSubOrchestrationAction( name=name, instanceId=instance_id, diff --git a/durabletask/internal/shared.py b/durabletask/internal/shared.py index 80c3d56..400529a 100644 --- a/durabletask/internal/shared.py +++ b/durabletask/internal/shared.py @@ -5,7 +5,7 @@ import json import logging from types import SimpleNamespace -from typing import Any, Dict, List, Tuple, Union +from typing import Any, Optional import grpc @@ -20,7 +20,10 @@ def get_default_host_address() -> str: return "localhost:4001" -def get_grpc_channel(host_address: Union[str, None], metadata: Union[List[Tuple[str, str]], None], secure_channel: bool = False) -> grpc.Channel: +def get_grpc_channel( + host_address: Optional[str], + metadata: Optional[list[tuple[str, str]]], + secure_channel: bool = False) -> grpc.Channel: if host_address is None: host_address = get_default_host_address() @@ -36,8 +39,8 @@ def get_grpc_channel(host_address: Union[str, None], metadata: Union[List[Tuple[ def get_logger( name_suffix: str, - log_handler: Union[logging.Handler, None] = None, - log_formatter: Union[logging.Formatter, None] = None) -> logging.Logger: + log_handler: Optional[logging.Handler] = None, + log_formatter: Optional[logging.Formatter] = None) -> logging.Logger: logger = logging.Logger(f"durabletask-{name_suffix}") # Add a default log handler if none is provided @@ -78,7 +81,7 @@ def default(self, obj): if dataclasses.is_dataclass(obj): # Dataclasses are not serializable by default, so we convert them to a dict and mark them for # automatic deserialization by the receiver - d = dataclasses.asdict(obj) + d = dataclasses.asdict(obj) # type: ignore d[AUTO_SERIALIZED] = True return d elif isinstance(obj, SimpleNamespace): @@ -94,7 +97,7 @@ class InternalJSONDecoder(json.JSONDecoder): def __init__(self, *args, **kwargs): super().__init__(object_hook=self.dict_to_object, *args, **kwargs) - def dict_to_object(self, d: Dict[str, Any]): + def dict_to_object(self, d: dict[str, Any]): # If the object was serialized by the InternalJSONEncoder, deserialize it as a SimpleNamespace if d.pop(AUTO_SERIALIZED, False): return SimpleNamespace(**d) diff --git a/durabletask/task.py b/durabletask/task.py index a9f85de..a40602b 100644 --- a/durabletask/task.py +++ b/durabletask/task.py @@ -7,8 +7,7 @@ import math from abc import ABC, abstractmethod from datetime import datetime, timedelta -from typing import (Any, Callable, Generator, Generic, List, Optional, TypeVar, - Union) +from typing import Any, Callable, Generator, Generic, Optional, TypeVar, Union import durabletask.internal.helpers as pbh import durabletask.internal.orchestrator_service_pb2 as pb @@ -72,8 +71,13 @@ def is_replaying(self) -> bool: pass @abstractmethod - def set_custom_status(self, custom_status: str) -> None: - """Set the custom status. + def set_custom_status(self, custom_status: Any) -> None: + """Set the orchestration instance's custom status. + + Parameters + ---------- + custom_status: Any + A JSON-serializable custom status value to set. """ pass @@ -254,9 +258,9 @@ def get_exception(self) -> TaskFailedError: class CompositeTask(Task[T]): """A task that is composed of other tasks.""" - _tasks: List[Task] + _tasks: list[Task] - def __init__(self, tasks: List[Task]): + def __init__(self, tasks: list[Task]): super().__init__() self._tasks = tasks self._completed_tasks = 0 @@ -266,17 +270,17 @@ def __init__(self, tasks: List[Task]): if task.is_complete: self.on_child_completed(task) - def get_tasks(self) -> List[Task]: + def get_tasks(self) -> list[Task]: return self._tasks @abstractmethod def on_child_completed(self, task: Task[T]): pass -class WhenAllTask(CompositeTask[List[T]]): +class WhenAllTask(CompositeTask[list[T]]): """A task that completes when all of its child tasks complete.""" - def __init__(self, tasks: List[Task[T]]): + def __init__(self, tasks: list[Task[T]]): super().__init__(tasks) self._completed_tasks = 0 self._failed_tasks = 0 @@ -340,7 +344,7 @@ def __init__(self, retry_policy: RetryPolicy, action: pb.OrchestratorAction, def increment_attempt_count(self) -> None: self._attempt_count += 1 - def compute_next_delay(self) -> Union[timedelta, None]: + def compute_next_delay(self) -> Optional[timedelta]: if self._attempt_count >= self._retry_policy.max_number_of_attempts: return None @@ -375,7 +379,7 @@ def set_retryable_parent(self, retryable_task: RetryableTask): class WhenAnyTask(CompositeTask[Task]): """A task that completes when any of its child tasks complete.""" - def __init__(self, tasks: List[Task]): + def __init__(self, tasks: list[Task]): super().__init__(tasks) def on_child_completed(self, task: Task): @@ -385,12 +389,12 @@ def on_child_completed(self, task: Task): self._result = task -def when_all(tasks: List[Task[T]]) -> WhenAllTask[T]: +def when_all(tasks: list[Task[T]]) -> WhenAllTask[T]: """Returns a task that completes when all of the provided tasks complete or when one of the tasks fail.""" return WhenAllTask(tasks) -def when_any(tasks: List[Task]) -> WhenAnyTask: +def when_any(tasks: list[Task]) -> WhenAnyTask: """Returns a task that completes when any of the provided tasks complete or fail.""" return WhenAnyTask(tasks) diff --git a/durabletask/worker.py b/durabletask/worker.py index bcc1a30..75e2e37 100644 --- a/durabletask/worker.py +++ b/durabletask/worker.py @@ -6,8 +6,7 @@ from datetime import datetime, timedelta from threading import Event, Thread from types import GeneratorType -from typing import (Any, Dict, Generator, List, Optional, Sequence, Tuple, - TypeVar, Union) +from typing import Any, Generator, Optional, Sequence, TypeVar, Union import grpc from google.protobuf import empty_pb2, wrappers_pb2 @@ -25,8 +24,8 @@ class _Registry: - orchestrators: Dict[str, task.Orchestrator] - activities: Dict[str, task.Activity] + orchestrators: dict[str, task.Orchestrator] + activities: dict[str, task.Activity] def __init__(self): self.orchestrators = {} @@ -86,7 +85,7 @@ class TaskHubGrpcWorker: def __init__(self, *, host_address: Optional[str] = None, - metadata: Optional[List[Tuple[str, str]]] = None, + metadata: Optional[list[tuple[str, str]]] = None, log_handler=None, log_formatter: Optional[logging.Formatter] = None, secure_channel: bool = False): @@ -140,7 +139,7 @@ def run_loop(): # The stream blocks until either a work item is received or the stream is canceled # by another thread (see the stop() method). - for work_item in self._response_stream: + for work_item in self._response_stream: # type: ignore request_type = work_item.WhichOneof('request') self._logger.debug(f'Received "{request_type}" work item') if work_item.HasField('orchestratorRequest'): @@ -189,7 +188,10 @@ def _execute_orchestrator(self, req: pb.OrchestratorRequest, stub: stubs.TaskHub try: executor = _OrchestrationExecutor(self._registry, self._logger) result = executor.execute(req.instanceId, req.pastEvents, req.newEvents) - res = pb.OrchestratorResponse(instanceId=req.instanceId, actions=result.actions, customStatus=wrappers_pb2.StringValue(value=result.custom_status)) + res = pb.OrchestratorResponse( + instanceId=req.instanceId, + actions=result.actions, + customStatus=pbh.get_string_value(result.encoded_custom_status)) except Exception as ex: self._logger.exception(f"An error occurred while trying to execute instance '{req.instanceId}': {ex}") failure_details = pbh.new_failure_details(ex) @@ -232,17 +234,17 @@ def __init__(self, instance_id: str): self._is_replaying = True self._is_complete = False self._result = None - self._pending_actions: Dict[int, pb.OrchestratorAction] = {} - self._pending_tasks: Dict[int, task.CompletableTask] = {} + self._pending_actions: dict[int, pb.OrchestratorAction] = {} + self._pending_tasks: dict[int, task.CompletableTask] = {} self._sequence_number = 0 self._current_utc_datetime = datetime(1000, 1, 1) self._instance_id = instance_id self._completion_status: Optional[pb.OrchestrationStatus] = None - self._received_events: Dict[str, List[Any]] = {} - self._pending_events: Dict[str, List[task.CompletableTask]] = {} + self._received_events: dict[str, list[Any]] = {} + self._pending_events: dict[str, list[task.CompletableTask]] = {} self._new_input: Optional[Any] = None self._save_events = False - self._custom_status: str = "" + self._encoded_custom_status: Optional[str] = None def run(self, generator: Generator[task.Task, Any, Any]): self._generator = generator @@ -314,10 +316,10 @@ def set_continued_as_new(self, new_input: Any, save_events: bool): self._new_input = new_input self._save_events = save_events - def get_actions(self) -> List[pb.OrchestratorAction]: + def get_actions(self) -> list[pb.OrchestratorAction]: if self._completion_status == pb.ORCHESTRATION_STATUS_CONTINUED_AS_NEW: # When continuing-as-new, we only return a single completion action. - carryover_events: Optional[List[pb.HistoryEvent]] = None + carryover_events: Optional[list[pb.HistoryEvent]] = None if self._save_events: carryover_events = [] # We need to save the current set of pending events so that they can be @@ -356,8 +358,8 @@ def is_replaying(self) -> bool: def current_utc_datetime(self, value: datetime): self._current_utc_datetime = value - def set_custom_status(self, custom_status: str) -> None: - self._custom_status = custom_status + def set_custom_status(self, custom_status: Any) -> None: + self._encoded_custom_status = shared.to_json(custom_status) if custom_status is not None else None def create_timer(self, fire_at: Union[datetime, timedelta]) -> task.Task: return self.create_timer_internal(fire_at) @@ -462,12 +464,12 @@ def continue_as_new(self, new_input, *, save_events: bool = False) -> None: class ExecutionResults: - actions: List[pb.OrchestratorAction] - custom_status: str + actions: list[pb.OrchestratorAction] + encoded_custom_status: Optional[str] - def __init__(self, actions: List[pb.OrchestratorAction], custom_status: str): + def __init__(self, actions: list[pb.OrchestratorAction], encoded_custom_status: Optional[str]): self.actions = actions - self.custom_status = custom_status + self.encoded_custom_status = encoded_custom_status class _OrchestrationExecutor: _generator: Optional[task.Orchestrator] = None @@ -476,7 +478,7 @@ def __init__(self, registry: _Registry, logger: logging.Logger): self._registry = registry self._logger = logger self._is_suspended = False - self._suspended_events: List[pb.HistoryEvent] = [] + self._suspended_events: list[pb.HistoryEvent] = [] def execute(self, instance_id: str, old_events: Sequence[pb.HistoryEvent], new_events: Sequence[pb.HistoryEvent]) -> ExecutionResults: if not new_events: @@ -513,7 +515,7 @@ def execute(self, instance_id: str, old_events: Sequence[pb.HistoryEvent], new_e actions = ctx.get_actions() if self._logger.level <= logging.DEBUG: self._logger.debug(f"{instance_id}: Returning {len(actions)} action(s): {_get_action_summary(actions)}") - return ExecutionResults(actions=actions, custom_status=ctx._custom_status) + return ExecutionResults(actions=actions, encoded_custom_status=ctx._encoded_custom_status) def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEvent) -> None: if self._is_suspended and _is_suspendable(event): @@ -829,7 +831,7 @@ def _get_new_event_summary(new_events: Sequence[pb.HistoryEvent]) -> str: elif len(new_events) == 1: return f"[{new_events[0].WhichOneof('eventType')}]" else: - counts: Dict[str, int] = {} + counts: dict[str, int] = {} for event in new_events: event_type = event.WhichOneof('eventType') counts[event_type] = counts.get(event_type, 0) + 1 @@ -843,7 +845,7 @@ def _get_action_summary(new_actions: Sequence[pb.OrchestratorAction]) -> str: elif len(new_actions) == 1: return f"[{new_actions[0].WhichOneof('orchestratorActionType')}]" else: - counts: Dict[str, int] = {} + counts: dict[str, int] = {} for action in new_actions: action_type = action.WhichOneof('orchestratorActionType') counts[action_type] = counts.get(action_type, 0) + 1 diff --git a/examples/fanout_fanin.py b/examples/fanout_fanin.py index 3e054df..c53744f 100644 --- a/examples/fanout_fanin.py +++ b/examples/fanout_fanin.py @@ -3,12 +3,11 @@ to complete, and prints an aggregate summary of the outputs.""" import random import time -from typing import List from durabletask import client, task, worker -def get_work_items(ctx: task.ActivityContext, _) -> List[str]: +def get_work_items(ctx: task.ActivityContext, _) -> list[str]: """Activity function that returns a list of work items""" # return a random number of work items count = random.randint(2, 10) @@ -32,11 +31,11 @@ def orchestrator(ctx: task.OrchestrationContext, _): activity functions in parallel, waits for them all to complete, and prints an aggregate summary of the outputs""" - work_items: List[str] = yield ctx.call_activity(get_work_items) + work_items: list[str] = yield ctx.call_activity(get_work_items) # execute the work-items in parallel and wait for them all to return tasks = [ctx.call_activity(process_work_item, input=item) for item in work_items] - results: List[int] = yield task.when_all(tasks) + results: list[int] = yield task.when_all(tasks) # return an aggregate summary of the results return { diff --git a/pyproject.toml b/pyproject.toml index d57957d..577824b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -9,7 +9,7 @@ build-backend = "setuptools.build_meta" [project] name = "durabletask" -version = "0.1.1-alpha.1" +version = "0.2b1" description = "A Durable Task Client SDK for Python" keywords = [ "durable", @@ -21,7 +21,7 @@ classifiers = [ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", ] -requires-python = ">=3.8" +requires-python = ">=3.9" license = {file = "LICENSE"} readme = "README.md" dependencies = [ diff --git a/requirements.txt b/requirements.txt index 641cee7..af76d88 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,6 @@ autopep8 grpcio grpcio-tools +protobuf pytest pytest-cov \ No newline at end of file diff --git a/tests/test_activity_executor.py b/tests/test_activity_executor.py index b9a4bd4..bfc8eaf 100644 --- a/tests/test_activity_executor.py +++ b/tests/test_activity_executor.py @@ -3,7 +3,7 @@ import json import logging -from typing import Any, Tuple, Union +from typing import Any, Optional, Tuple from durabletask import task, worker @@ -40,7 +40,7 @@ def test_activity(ctx: task.ActivityContext, _): executor, _ = _get_activity_executor(test_activity) - caught_exception: Union[Exception, None] = None + caught_exception: Optional[Exception] = None try: executor.execute(TEST_INSTANCE_ID, "Bogus", TEST_TASK_ID, None) except Exception as ex: diff --git a/tests/test_orchestration_e2e.py b/tests/test_orchestration_e2e.py index 1cfc520..d3d7f0b 100644 --- a/tests/test_orchestration_e2e.py +++ b/tests/test_orchestration_e2e.py @@ -466,4 +466,4 @@ def empty_orchestrator(ctx: task.OrchestrationContext, _): assert state.runtime_status == client.OrchestrationStatus.COMPLETED assert state.serialized_input is None assert state.serialized_output is None - assert state.serialized_custom_status is "\"foobaz\"" + assert state.serialized_custom_status == "\"foobaz\"" diff --git a/tests/test_orchestration_executor.py b/tests/test_orchestration_executor.py index 95eab0b..cb77c81 100644 --- a/tests/test_orchestration_executor.py +++ b/tests/test_orchestration_executor.py @@ -4,7 +4,6 @@ import json import logging from datetime import datetime, timedelta -from typing import List import pytest @@ -1184,7 +1183,7 @@ def orchestrator(ctx: task.OrchestrationContext, _): assert str(ex) in complete_action.failureDetails.errorMessage -def get_and_validate_single_complete_orchestration_action(actions: List[pb.OrchestratorAction]) -> pb.CompleteOrchestrationAction: +def get_and_validate_single_complete_orchestration_action(actions: list[pb.OrchestratorAction]) -> pb.CompleteOrchestrationAction: assert len(actions) == 1 assert type(actions[0]) is pb.OrchestratorAction assert actions[0].HasField("completeOrchestration") From 4a303cb5f0ae14ea00c48ccb556d99ca096a5d71 Mon Sep 17 00:00:00 2001 From: Bernd Verst Date: Wed, 8 Jan 2025 14:51:24 -0800 Subject: [PATCH 14/81] Downgrade required `grpcio` and `protobuf` versions (#36) Signed-off-by: Albert Callarisa --- CHANGELOG.md | 4 + Makefile | 5 +- README.md | 1 + dev-requirements.txt | 1 + durabletask/internal/__init__.py | 0 .../internal/orchestrator_service_pb2.py | 386 +++++----- .../internal/orchestrator_service_pb2_grpc.py | 673 ++++++------------ requirements.txt | 5 +- 8 files changed, 414 insertions(+), 661 deletions(-) create mode 100644 dev-requirements.txt delete mode 100644 durabletask/internal/__init__.py diff --git a/CHANGELOG.md b/CHANGELOG.md index fc4b3d2..a09078d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Added `set_custom_status` orchestrator API ([#31](https://github.com/microsoft/durabletask-python/pull/31)) - contributed by [@famarting](https://github.com/famarting) - Added `purge_orchestration` client API ([#34](https://github.com/microsoft/durabletask-python/pull/34)) - contributed by [@famarting](https://github.com/famarting) +### Changes + +- Protos are compiled with gRPC 1.62.3 / protobuf 3.25.X instead of the latest release. This ensures compatibility with a wider range of grpcio versions for better compatibility with other packages / libraries. + ### Updates - Updated `durabletask-protobuf` submodule reference to latest diff --git a/Makefile b/Makefile index 16b883e..68a9b89 100644 --- a/Makefile +++ b/Makefile @@ -11,7 +11,8 @@ install: python3 -m pip install . gen-proto: -# NOTE: There is currently a hand-edit that we make to the generated orchestrator_service_pb2.py file after it's generated to help resolve import problems. - python3 -m grpc_tools.protoc --proto_path=./submodules/durabletask-protobuf/protos --python_out=./durabletask/internal --pyi_out=./durabletask/internal --grpc_python_out=./durabletask/internal orchestrator_service.proto + cp ./submodules/durabletask-protobuf/protos/orchestrator_service.proto durabletask/internal/orchestrator_service.proto + python3 -m grpc_tools.protoc --proto_path=. --python_out=. --pyi_out=. --grpc_python_out=. ./durabletask/internal/orchestrator_service.proto + rm durabletask/internal/*.proto .PHONY: init test-unit test-e2e gen-proto install diff --git a/README.md b/README.md index 22b3c44..81b5a54 100644 --- a/README.md +++ b/README.md @@ -170,6 +170,7 @@ git submodule update --init Once the submodule is available, the corresponding source code can be regenerated using the following command from the project root: ```sh +pip3 install -r dev-requirements.txt make gen-proto ``` diff --git a/dev-requirements.txt b/dev-requirements.txt new file mode 100644 index 0000000..119f072 --- /dev/null +++ b/dev-requirements.txt @@ -0,0 +1 @@ +grpcio-tools==1.62.3 # 1.62.X is the latest version before protobuf 1.26.X is used which has breaking changes for Python diff --git a/durabletask/internal/__init__.py b/durabletask/internal/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/durabletask/internal/orchestrator_service_pb2.py b/durabletask/internal/orchestrator_service_pb2.py index 6ee3bbb..9c92eac 100644 --- a/durabletask/internal/orchestrator_service_pb2.py +++ b/durabletask/internal/orchestrator_service_pb2.py @@ -1,22 +1,12 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! -# NO CHECKED-IN PROTOBUF GENCODE -# source: orchestrator_service.proto -# Protobuf Python Version: 5.27.2 +# source: durabletask/internal/orchestrator_service.proto +# Protobuf Python Version: 4.25.1 """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import runtime_version as _runtime_version from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder -_runtime_version.ValidateProtobufRuntimeVersion( - _runtime_version.Domain.PUBLIC, - 5, - 27, - 2, - '', - 'orchestrator_service.proto' -) # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() @@ -28,196 +18,196 @@ from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1aorchestrator_service.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1bgoogle/protobuf/empty.proto\"^\n\x15OrchestrationInstance\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xed\x01\n\x0f\x41\x63tivityRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\x12\x0e\n\x06taskId\x18\x05 \x01(\x05\x12)\n\x12parentTraceContext\x18\x06 \x01(\x0b\x32\r.TraceContext\"\x91\x01\n\x10\x41\x63tivityResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0e\n\x06taskId\x18\x02 \x01(\x05\x12,\n\x06result\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\"\xb2\x01\n\x12TaskFailureDetails\x12\x11\n\terrorType\x18\x01 \x01(\t\x12\x14\n\x0c\x65rrorMessage\x18\x02 \x01(\t\x12\x30\n\nstackTrace\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x0cinnerFailure\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\x12\x16\n\x0eisNonRetriable\x18\x05 \x01(\x08\"\xbf\x01\n\x12ParentInstanceInfo\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12*\n\x04name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\"i\n\x0cTraceContext\x12\x13\n\x0btraceParent\x18\x01 \x01(\t\x12\x12\n\x06spanID\x18\x02 \x01(\tB\x02\x18\x01\x12\x30\n\ntraceState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x88\x03\n\x15\x45xecutionStartedEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\x12+\n\x0eparentInstance\x18\x05 \x01(\x0b\x32\x13.ParentInstanceInfo\x12;\n\x17scheduledStartTimestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12)\n\x12parentTraceContext\x18\x07 \x01(\x0b\x32\r.TraceContext\x12\x39\n\x13orchestrationSpanID\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xa7\x01\n\x17\x45xecutionCompletedEvent\x12\x31\n\x13orchestrationStatus\x18\x01 \x01(\x0e\x32\x14.OrchestrationStatus\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x03 \x01(\x0b\x32\x13.TaskFailureDetails\"X\n\x18\x45xecutionTerminatedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x0f\n\x07recurse\x18\x02 \x01(\x08\"\xa9\x01\n\x12TaskScheduledEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x12parentTraceContext\x18\x04 \x01(\x0b\x32\r.TraceContext\"[\n\x12TaskCompletedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"W\n\x0fTaskFailedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\"\xcf\x01\n$SubOrchestrationInstanceCreatedEvent\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x12parentTraceContext\x18\x05 \x01(\x0b\x32\r.TraceContext\"o\n&SubOrchestrationInstanceCompletedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"k\n#SubOrchestrationInstanceFailedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\"?\n\x11TimerCreatedEvent\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"N\n\x0fTimerFiredEvent\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0f\n\x07timerId\x18\x02 \x01(\x05\"\x1a\n\x18OrchestratorStartedEvent\"\x1c\n\x1aOrchestratorCompletedEvent\"_\n\x0e\x45ventSentEvent\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"M\n\x10\x45ventRaisedEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x05input\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\":\n\x0cGenericEvent\x12*\n\x04\x64\x61ta\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"D\n\x11HistoryStateEvent\x12/\n\x12orchestrationState\x18\x01 \x01(\x0b\x32\x13.OrchestrationState\"A\n\x12\x43ontinueAsNewEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"F\n\x17\x45xecutionSuspendedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"D\n\x15\x45xecutionResumedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x86\t\n\x0cHistoryEvent\x12\x0f\n\x07\x65ventId\x18\x01 \x01(\x05\x12-\n\ttimestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x10\x65xecutionStarted\x18\x03 \x01(\x0b\x32\x16.ExecutionStartedEventH\x00\x12\x36\n\x12\x65xecutionCompleted\x18\x04 \x01(\x0b\x32\x18.ExecutionCompletedEventH\x00\x12\x38\n\x13\x65xecutionTerminated\x18\x05 \x01(\x0b\x32\x19.ExecutionTerminatedEventH\x00\x12,\n\rtaskScheduled\x18\x06 \x01(\x0b\x32\x13.TaskScheduledEventH\x00\x12,\n\rtaskCompleted\x18\x07 \x01(\x0b\x32\x13.TaskCompletedEventH\x00\x12&\n\ntaskFailed\x18\x08 \x01(\x0b\x32\x10.TaskFailedEventH\x00\x12P\n\x1fsubOrchestrationInstanceCreated\x18\t \x01(\x0b\x32%.SubOrchestrationInstanceCreatedEventH\x00\x12T\n!subOrchestrationInstanceCompleted\x18\n \x01(\x0b\x32\'.SubOrchestrationInstanceCompletedEventH\x00\x12N\n\x1esubOrchestrationInstanceFailed\x18\x0b \x01(\x0b\x32$.SubOrchestrationInstanceFailedEventH\x00\x12*\n\x0ctimerCreated\x18\x0c \x01(\x0b\x32\x12.TimerCreatedEventH\x00\x12&\n\ntimerFired\x18\r \x01(\x0b\x32\x10.TimerFiredEventH\x00\x12\x38\n\x13orchestratorStarted\x18\x0e \x01(\x0b\x32\x19.OrchestratorStartedEventH\x00\x12<\n\x15orchestratorCompleted\x18\x0f \x01(\x0b\x32\x1b.OrchestratorCompletedEventH\x00\x12$\n\teventSent\x18\x10 \x01(\x0b\x32\x0f.EventSentEventH\x00\x12(\n\x0b\x65ventRaised\x18\x11 \x01(\x0b\x32\x11.EventRaisedEventH\x00\x12%\n\x0cgenericEvent\x18\x12 \x01(\x0b\x32\r.GenericEventH\x00\x12*\n\x0chistoryState\x18\x13 \x01(\x0b\x32\x12.HistoryStateEventH\x00\x12,\n\rcontinueAsNew\x18\x14 \x01(\x0b\x32\x13.ContinueAsNewEventH\x00\x12\x36\n\x12\x65xecutionSuspended\x18\x15 \x01(\x0b\x32\x18.ExecutionSuspendedEventH\x00\x12\x32\n\x10\x65xecutionResumed\x18\x16 \x01(\x0b\x32\x16.ExecutionResumedEventH\x00\x42\x0b\n\teventType\"~\n\x12ScheduleTaskAction\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x9c\x01\n\x1c\x43reateSubOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"?\n\x11\x43reateTimerAction\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"u\n\x0fSendEventAction\x12(\n\x08instance\x18\x01 \x01(\x0b\x32\x16.OrchestrationInstance\x12\x0c\n\x04name\x18\x02 \x01(\t\x12*\n\x04\x64\x61ta\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xb4\x02\n\x1b\x43ompleteOrchestrationAction\x12\x31\n\x13orchestrationStatus\x18\x01 \x01(\x0e\x32\x14.OrchestrationStatus\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07\x64\x65tails\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\nnewVersion\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12&\n\x0f\x63\x61rryoverEvents\x18\x05 \x03(\x0b\x32\r.HistoryEvent\x12+\n\x0e\x66\x61ilureDetails\x18\x06 \x01(\x0b\x32\x13.TaskFailureDetails\"q\n\x1cTerminateOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x0f\n\x07recurse\x18\x03 \x01(\x08\"\xfa\x02\n\x12OrchestratorAction\x12\n\n\x02id\x18\x01 \x01(\x05\x12+\n\x0cscheduleTask\x18\x02 \x01(\x0b\x32\x13.ScheduleTaskActionH\x00\x12?\n\x16\x63reateSubOrchestration\x18\x03 \x01(\x0b\x32\x1d.CreateSubOrchestrationActionH\x00\x12)\n\x0b\x63reateTimer\x18\x04 \x01(\x0b\x32\x12.CreateTimerActionH\x00\x12%\n\tsendEvent\x18\x05 \x01(\x0b\x32\x10.SendEventActionH\x00\x12=\n\x15\x63ompleteOrchestration\x18\x06 \x01(\x0b\x32\x1c.CompleteOrchestrationActionH\x00\x12?\n\x16terminateOrchestration\x18\x07 \x01(\x0b\x32\x1d.TerminateOrchestrationActionH\x00\x42\x18\n\x16orchestratorActionType\"\xda\x01\n\x13OrchestratorRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12!\n\npastEvents\x18\x03 \x03(\x0b\x32\r.HistoryEvent\x12 \n\tnewEvents\x18\x04 \x03(\x0b\x32\r.HistoryEvent\x12\x37\n\x10\x65ntityParameters\x18\x05 \x01(\x0b\x32\x1d.OrchestratorEntityParameters\"\x84\x01\n\x14OrchestratorResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12$\n\x07\x61\x63tions\x18\x02 \x03(\x0b\x32\x13.OrchestratorAction\x12\x32\n\x0c\x63ustomStatus\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xa3\x03\n\x15\x43reateInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12;\n\x17scheduledStartTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12?\n\x1aorchestrationIdReusePolicy\x18\x06 \x01(\x0b\x32\x1b.OrchestrationIdReusePolicy\x12\x31\n\x0b\x65xecutionId\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x04tags\x18\x08 \x03(\x0b\x32 .CreateInstanceRequest.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"w\n\x1aOrchestrationIdReusePolicy\x12-\n\x0foperationStatus\x18\x01 \x03(\x0e\x32\x14.OrchestrationStatus\x12*\n\x06\x61\x63tion\x18\x02 \x01(\x0e\x32\x1a.CreateOrchestrationAction\",\n\x16\x43reateInstanceResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\"E\n\x12GetInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x1b\n\x13getInputsAndOutputs\x18\x02 \x01(\x08\"V\n\x13GetInstanceResponse\x12\x0e\n\x06\x65xists\x18\x01 \x01(\x08\x12/\n\x12orchestrationState\x18\x02 \x01(\x0b\x32\x13.OrchestrationState\"Y\n\x15RewindInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x18\n\x16RewindInstanceResponse\"\xa4\x05\n\x12OrchestrationState\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\x13orchestrationStatus\x18\x04 \x01(\x0e\x32\x14.OrchestrationStatus\x12;\n\x17scheduledStartTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x34\n\x10\x63reatedTimestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x38\n\x14lastUpdatedTimestamp\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\x05input\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12,\n\x06output\x18\t \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0c\x63ustomStatus\x18\n \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x0b \x01(\x0b\x32\x13.TaskFailureDetails\x12\x31\n\x0b\x65xecutionId\x18\x0c \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x12\x63ompletedTimestamp\x18\r \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x36\n\x10parentInstanceId\x18\x0e \x01(\x0b\x32\x1c.google.protobuf.StringValue\"b\n\x11RaiseEventRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x14\n\x12RaiseEventResponse\"g\n\x10TerminateRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06output\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x11\n\trecursive\x18\x03 \x01(\x08\"\x13\n\x11TerminateResponse\"R\n\x0eSuspendRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x11\n\x0fSuspendResponse\"Q\n\rResumeRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x10\n\x0eResumeResponse\"6\n\x15QueryInstancesRequest\x12\x1d\n\x05query\x18\x01 \x01(\x0b\x32\x0e.InstanceQuery\"\x82\x03\n\rInstanceQuery\x12+\n\rruntimeStatus\x18\x01 \x03(\x0e\x32\x14.OrchestrationStatus\x12\x33\n\x0f\x63reatedTimeFrom\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rcreatedTimeTo\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0ctaskHubNames\x18\x04 \x03(\x0b\x32\x1c.google.protobuf.StringValue\x12\x18\n\x10maxInstanceCount\x18\x05 \x01(\x05\x12\x37\n\x11\x63ontinuationToken\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10instanceIdPrefix\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1d\n\x15\x66\x65tchInputsAndOutputs\x18\x08 \x01(\x08\"\x82\x01\n\x16QueryInstancesResponse\x12/\n\x12orchestrationState\x18\x01 \x03(\x0b\x32\x13.OrchestrationState\x12\x37\n\x11\x63ontinuationToken\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x80\x01\n\x15PurgeInstancesRequest\x12\x14\n\ninstanceId\x18\x01 \x01(\tH\x00\x12\x33\n\x13purgeInstanceFilter\x18\x02 \x01(\x0b\x32\x14.PurgeInstanceFilterH\x00\x12\x11\n\trecursive\x18\x03 \x01(\x08\x42\t\n\x07request\"\xaa\x01\n\x13PurgeInstanceFilter\x12\x33\n\x0f\x63reatedTimeFrom\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rcreatedTimeTo\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\rruntimeStatus\x18\x03 \x03(\x0e\x32\x14.OrchestrationStatus\"6\n\x16PurgeInstancesResponse\x12\x1c\n\x14\x64\x65letedInstanceCount\x18\x01 \x01(\x05\"0\n\x14\x43reateTaskHubRequest\x12\x18\n\x10recreateIfExists\x18\x01 \x01(\x08\"\x17\n\x15\x43reateTaskHubResponse\"\x16\n\x14\x44\x65leteTaskHubRequest\"\x17\n\x15\x44\x65leteTaskHubResponse\"\xaa\x01\n\x13SignalEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x11\n\trequestId\x18\x04 \x01(\t\x12\x31\n\rscheduledTime\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\x16\n\x14SignalEntityResponse\"<\n\x10GetEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x14\n\x0cincludeState\x18\x02 \x01(\x08\"D\n\x11GetEntityResponse\x12\x0e\n\x06\x65xists\x18\x01 \x01(\x08\x12\x1f\n\x06\x65ntity\x18\x02 \x01(\x0b\x32\x0f.EntityMetadata\"\xcb\x02\n\x0b\x45ntityQuery\x12:\n\x14instanceIdStartsWith\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x34\n\x10lastModifiedFrom\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0elastModifiedTo\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x14\n\x0cincludeState\x18\x04 \x01(\x08\x12\x18\n\x10includeTransient\x18\x05 \x01(\x08\x12-\n\x08pageSize\x18\x06 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12\x37\n\x11\x63ontinuationToken\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"3\n\x14QueryEntitiesRequest\x12\x1b\n\x05query\x18\x01 \x01(\x0b\x32\x0c.EntityQuery\"s\n\x15QueryEntitiesResponse\x12!\n\x08\x65ntities\x18\x01 \x03(\x0b\x32\x0f.EntityMetadata\x12\x37\n\x11\x63ontinuationToken\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xdb\x01\n\x0e\x45ntityMetadata\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x34\n\x10lastModifiedTime\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x18\n\x10\x62\x61\x63klogQueueSize\x18\x03 \x01(\x05\x12.\n\x08lockedBy\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x0fserializedState\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x8f\x01\n\x19\x43leanEntityStorageRequest\x12\x37\n\x11\x63ontinuationToken\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1b\n\x13removeEmptyEntities\x18\x02 \x01(\x08\x12\x1c\n\x14releaseOrphanedLocks\x18\x03 \x01(\x08\"\x92\x01\n\x1a\x43leanEntityStorageResponse\x12\x37\n\x11\x63ontinuationToken\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1c\n\x14\x65mptyEntitiesRemoved\x18\x02 \x01(\x05\x12\x1d\n\x15orphanedLocksReleased\x18\x03 \x01(\x05\"]\n\x1cOrchestratorEntityParameters\x12=\n\x1a\x65ntityMessageReorderWindow\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\"\x82\x01\n\x12\x45ntityBatchRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65ntityState\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12%\n\noperations\x18\x03 \x03(\x0b\x32\x11.OperationRequest\"\xb9\x01\n\x11\x45ntityBatchResult\x12!\n\x07results\x18\x01 \x03(\x0b\x32\x10.OperationResult\x12!\n\x07\x61\x63tions\x18\x02 \x03(\x0b\x32\x10.OperationAction\x12\x31\n\x0b\x65ntityState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\"e\n\x10OperationRequest\x12\x11\n\toperation\x18\x01 \x01(\t\x12\x11\n\trequestId\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"w\n\x0fOperationResult\x12*\n\x07success\x18\x01 \x01(\x0b\x32\x17.OperationResultSuccessH\x00\x12*\n\x07\x66\x61ilure\x18\x02 \x01(\x0b\x32\x17.OperationResultFailureH\x00\x42\x0c\n\nresultType\"F\n\x16OperationResultSuccess\x12,\n\x06result\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"E\n\x16OperationResultFailure\x12+\n\x0e\x66\x61ilureDetails\x18\x01 \x01(\x0b\x32\x13.TaskFailureDetails\"\x9c\x01\n\x0fOperationAction\x12\n\n\x02id\x18\x01 \x01(\x05\x12\'\n\nsendSignal\x18\x02 \x01(\x0b\x32\x11.SendSignalActionH\x00\x12=\n\x15startNewOrchestration\x18\x03 \x01(\x0b\x32\x1c.StartNewOrchestrationActionH\x00\x42\x15\n\x13operationActionType\"\x94\x01\n\x10SendSignalAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rscheduledTime\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xce\x01\n\x1bStartNewOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rscheduledTime\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\x15\n\x13GetWorkItemsRequest\"\xe1\x01\n\x08WorkItem\x12\x33\n\x13orchestratorRequest\x18\x01 \x01(\x0b\x32\x14.OrchestratorRequestH\x00\x12+\n\x0f\x61\x63tivityRequest\x18\x02 \x01(\x0b\x32\x10.ActivityRequestH\x00\x12,\n\rentityRequest\x18\x03 \x01(\x0b\x32\x13.EntityBatchRequestH\x00\x12!\n\nhealthPing\x18\x04 \x01(\x0b\x32\x0b.HealthPingH\x00\x12\x17\n\x0f\x63ompletionToken\x18\n \x01(\tB\t\n\x07request\"\x16\n\x14\x43ompleteTaskResponse\"\x0c\n\nHealthPing*\xb5\x02\n\x13OrchestrationStatus\x12 \n\x1cORCHESTRATION_STATUS_RUNNING\x10\x00\x12\"\n\x1eORCHESTRATION_STATUS_COMPLETED\x10\x01\x12)\n%ORCHESTRATION_STATUS_CONTINUED_AS_NEW\x10\x02\x12\x1f\n\x1bORCHESTRATION_STATUS_FAILED\x10\x03\x12!\n\x1dORCHESTRATION_STATUS_CANCELED\x10\x04\x12#\n\x1fORCHESTRATION_STATUS_TERMINATED\x10\x05\x12 \n\x1cORCHESTRATION_STATUS_PENDING\x10\x06\x12\"\n\x1eORCHESTRATION_STATUS_SUSPENDED\x10\x07*A\n\x19\x43reateOrchestrationAction\x12\t\n\x05\x45RROR\x10\x00\x12\n\n\x06IGNORE\x10\x01\x12\r\n\tTERMINATE\x10\x02\x32\xfc\n\n\x15TaskHubSidecarService\x12\x37\n\x05Hello\x12\x16.google.protobuf.Empty\x1a\x16.google.protobuf.Empty\x12@\n\rStartInstance\x12\x16.CreateInstanceRequest\x1a\x17.CreateInstanceResponse\x12\x38\n\x0bGetInstance\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x41\n\x0eRewindInstance\x12\x16.RewindInstanceRequest\x1a\x17.RewindInstanceResponse\x12\x41\n\x14WaitForInstanceStart\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x46\n\x19WaitForInstanceCompletion\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x35\n\nRaiseEvent\x12\x12.RaiseEventRequest\x1a\x13.RaiseEventResponse\x12:\n\x11TerminateInstance\x12\x11.TerminateRequest\x1a\x12.TerminateResponse\x12\x34\n\x0fSuspendInstance\x12\x0f.SuspendRequest\x1a\x10.SuspendResponse\x12\x31\n\x0eResumeInstance\x12\x0e.ResumeRequest\x1a\x0f.ResumeResponse\x12\x41\n\x0eQueryInstances\x12\x16.QueryInstancesRequest\x1a\x17.QueryInstancesResponse\x12\x41\n\x0ePurgeInstances\x12\x16.PurgeInstancesRequest\x1a\x17.PurgeInstancesResponse\x12\x31\n\x0cGetWorkItems\x12\x14.GetWorkItemsRequest\x1a\t.WorkItem0\x01\x12@\n\x14\x43ompleteActivityTask\x12\x11.ActivityResponse\x1a\x15.CompleteTaskResponse\x12H\n\x18\x43ompleteOrchestratorTask\x12\x15.OrchestratorResponse\x1a\x15.CompleteTaskResponse\x12?\n\x12\x43ompleteEntityTask\x12\x12.EntityBatchResult\x1a\x15.CompleteTaskResponse\x12>\n\rCreateTaskHub\x12\x15.CreateTaskHubRequest\x1a\x16.CreateTaskHubResponse\x12>\n\rDeleteTaskHub\x12\x15.DeleteTaskHubRequest\x1a\x16.DeleteTaskHubResponse\x12;\n\x0cSignalEntity\x12\x14.SignalEntityRequest\x1a\x15.SignalEntityResponse\x12\x32\n\tGetEntity\x12\x11.GetEntityRequest\x1a\x12.GetEntityResponse\x12>\n\rQueryEntities\x12\x15.QueryEntitiesRequest\x1a\x16.QueryEntitiesResponse\x12M\n\x12\x43leanEntityStorage\x12\x1a.CleanEntityStorageRequest\x1a\x1b.CleanEntityStorageResponseBf\n1com.microsoft.durabletask.implementation.protobufZ\x10/internal/protos\xaa\x02\x1eMicrosoft.DurableTask.Protobufb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n/durabletask/internal/orchestrator_service.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1bgoogle/protobuf/empty.proto\"^\n\x15OrchestrationInstance\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xed\x01\n\x0f\x41\x63tivityRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\x12\x0e\n\x06taskId\x18\x05 \x01(\x05\x12)\n\x12parentTraceContext\x18\x06 \x01(\x0b\x32\r.TraceContext\"\x91\x01\n\x10\x41\x63tivityResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0e\n\x06taskId\x18\x02 \x01(\x05\x12,\n\x06result\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\"\xb2\x01\n\x12TaskFailureDetails\x12\x11\n\terrorType\x18\x01 \x01(\t\x12\x14\n\x0c\x65rrorMessage\x18\x02 \x01(\t\x12\x30\n\nstackTrace\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x0cinnerFailure\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\x12\x16\n\x0eisNonRetriable\x18\x05 \x01(\x08\"\xbf\x01\n\x12ParentInstanceInfo\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12*\n\x04name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\"i\n\x0cTraceContext\x12\x13\n\x0btraceParent\x18\x01 \x01(\t\x12\x12\n\x06spanID\x18\x02 \x01(\tB\x02\x18\x01\x12\x30\n\ntraceState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x88\x03\n\x15\x45xecutionStartedEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\x12+\n\x0eparentInstance\x18\x05 \x01(\x0b\x32\x13.ParentInstanceInfo\x12;\n\x17scheduledStartTimestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12)\n\x12parentTraceContext\x18\x07 \x01(\x0b\x32\r.TraceContext\x12\x39\n\x13orchestrationSpanID\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xa7\x01\n\x17\x45xecutionCompletedEvent\x12\x31\n\x13orchestrationStatus\x18\x01 \x01(\x0e\x32\x14.OrchestrationStatus\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x03 \x01(\x0b\x32\x13.TaskFailureDetails\"X\n\x18\x45xecutionTerminatedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x0f\n\x07recurse\x18\x02 \x01(\x08\"\xa9\x01\n\x12TaskScheduledEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x12parentTraceContext\x18\x04 \x01(\x0b\x32\r.TraceContext\"[\n\x12TaskCompletedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"W\n\x0fTaskFailedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\"\xcf\x01\n$SubOrchestrationInstanceCreatedEvent\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x12parentTraceContext\x18\x05 \x01(\x0b\x32\r.TraceContext\"o\n&SubOrchestrationInstanceCompletedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"k\n#SubOrchestrationInstanceFailedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\"?\n\x11TimerCreatedEvent\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"N\n\x0fTimerFiredEvent\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0f\n\x07timerId\x18\x02 \x01(\x05\"\x1a\n\x18OrchestratorStartedEvent\"\x1c\n\x1aOrchestratorCompletedEvent\"_\n\x0e\x45ventSentEvent\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"M\n\x10\x45ventRaisedEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x05input\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\":\n\x0cGenericEvent\x12*\n\x04\x64\x61ta\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"D\n\x11HistoryStateEvent\x12/\n\x12orchestrationState\x18\x01 \x01(\x0b\x32\x13.OrchestrationState\"A\n\x12\x43ontinueAsNewEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"F\n\x17\x45xecutionSuspendedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"D\n\x15\x45xecutionResumedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x86\t\n\x0cHistoryEvent\x12\x0f\n\x07\x65ventId\x18\x01 \x01(\x05\x12-\n\ttimestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x10\x65xecutionStarted\x18\x03 \x01(\x0b\x32\x16.ExecutionStartedEventH\x00\x12\x36\n\x12\x65xecutionCompleted\x18\x04 \x01(\x0b\x32\x18.ExecutionCompletedEventH\x00\x12\x38\n\x13\x65xecutionTerminated\x18\x05 \x01(\x0b\x32\x19.ExecutionTerminatedEventH\x00\x12,\n\rtaskScheduled\x18\x06 \x01(\x0b\x32\x13.TaskScheduledEventH\x00\x12,\n\rtaskCompleted\x18\x07 \x01(\x0b\x32\x13.TaskCompletedEventH\x00\x12&\n\ntaskFailed\x18\x08 \x01(\x0b\x32\x10.TaskFailedEventH\x00\x12P\n\x1fsubOrchestrationInstanceCreated\x18\t \x01(\x0b\x32%.SubOrchestrationInstanceCreatedEventH\x00\x12T\n!subOrchestrationInstanceCompleted\x18\n \x01(\x0b\x32\'.SubOrchestrationInstanceCompletedEventH\x00\x12N\n\x1esubOrchestrationInstanceFailed\x18\x0b \x01(\x0b\x32$.SubOrchestrationInstanceFailedEventH\x00\x12*\n\x0ctimerCreated\x18\x0c \x01(\x0b\x32\x12.TimerCreatedEventH\x00\x12&\n\ntimerFired\x18\r \x01(\x0b\x32\x10.TimerFiredEventH\x00\x12\x38\n\x13orchestratorStarted\x18\x0e \x01(\x0b\x32\x19.OrchestratorStartedEventH\x00\x12<\n\x15orchestratorCompleted\x18\x0f \x01(\x0b\x32\x1b.OrchestratorCompletedEventH\x00\x12$\n\teventSent\x18\x10 \x01(\x0b\x32\x0f.EventSentEventH\x00\x12(\n\x0b\x65ventRaised\x18\x11 \x01(\x0b\x32\x11.EventRaisedEventH\x00\x12%\n\x0cgenericEvent\x18\x12 \x01(\x0b\x32\r.GenericEventH\x00\x12*\n\x0chistoryState\x18\x13 \x01(\x0b\x32\x12.HistoryStateEventH\x00\x12,\n\rcontinueAsNew\x18\x14 \x01(\x0b\x32\x13.ContinueAsNewEventH\x00\x12\x36\n\x12\x65xecutionSuspended\x18\x15 \x01(\x0b\x32\x18.ExecutionSuspendedEventH\x00\x12\x32\n\x10\x65xecutionResumed\x18\x16 \x01(\x0b\x32\x16.ExecutionResumedEventH\x00\x42\x0b\n\teventType\"~\n\x12ScheduleTaskAction\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x9c\x01\n\x1c\x43reateSubOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"?\n\x11\x43reateTimerAction\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"u\n\x0fSendEventAction\x12(\n\x08instance\x18\x01 \x01(\x0b\x32\x16.OrchestrationInstance\x12\x0c\n\x04name\x18\x02 \x01(\t\x12*\n\x04\x64\x61ta\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xb4\x02\n\x1b\x43ompleteOrchestrationAction\x12\x31\n\x13orchestrationStatus\x18\x01 \x01(\x0e\x32\x14.OrchestrationStatus\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07\x64\x65tails\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\nnewVersion\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12&\n\x0f\x63\x61rryoverEvents\x18\x05 \x03(\x0b\x32\r.HistoryEvent\x12+\n\x0e\x66\x61ilureDetails\x18\x06 \x01(\x0b\x32\x13.TaskFailureDetails\"q\n\x1cTerminateOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x0f\n\x07recurse\x18\x03 \x01(\x08\"\xfa\x02\n\x12OrchestratorAction\x12\n\n\x02id\x18\x01 \x01(\x05\x12+\n\x0cscheduleTask\x18\x02 \x01(\x0b\x32\x13.ScheduleTaskActionH\x00\x12?\n\x16\x63reateSubOrchestration\x18\x03 \x01(\x0b\x32\x1d.CreateSubOrchestrationActionH\x00\x12)\n\x0b\x63reateTimer\x18\x04 \x01(\x0b\x32\x12.CreateTimerActionH\x00\x12%\n\tsendEvent\x18\x05 \x01(\x0b\x32\x10.SendEventActionH\x00\x12=\n\x15\x63ompleteOrchestration\x18\x06 \x01(\x0b\x32\x1c.CompleteOrchestrationActionH\x00\x12?\n\x16terminateOrchestration\x18\x07 \x01(\x0b\x32\x1d.TerminateOrchestrationActionH\x00\x42\x18\n\x16orchestratorActionType\"\xda\x01\n\x13OrchestratorRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12!\n\npastEvents\x18\x03 \x03(\x0b\x32\r.HistoryEvent\x12 \n\tnewEvents\x18\x04 \x03(\x0b\x32\r.HistoryEvent\x12\x37\n\x10\x65ntityParameters\x18\x05 \x01(\x0b\x32\x1d.OrchestratorEntityParameters\"\x84\x01\n\x14OrchestratorResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12$\n\x07\x61\x63tions\x18\x02 \x03(\x0b\x32\x13.OrchestratorAction\x12\x32\n\x0c\x63ustomStatus\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xa3\x03\n\x15\x43reateInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12;\n\x17scheduledStartTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12?\n\x1aorchestrationIdReusePolicy\x18\x06 \x01(\x0b\x32\x1b.OrchestrationIdReusePolicy\x12\x31\n\x0b\x65xecutionId\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x04tags\x18\x08 \x03(\x0b\x32 .CreateInstanceRequest.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"w\n\x1aOrchestrationIdReusePolicy\x12-\n\x0foperationStatus\x18\x01 \x03(\x0e\x32\x14.OrchestrationStatus\x12*\n\x06\x61\x63tion\x18\x02 \x01(\x0e\x32\x1a.CreateOrchestrationAction\",\n\x16\x43reateInstanceResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\"E\n\x12GetInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x1b\n\x13getInputsAndOutputs\x18\x02 \x01(\x08\"V\n\x13GetInstanceResponse\x12\x0e\n\x06\x65xists\x18\x01 \x01(\x08\x12/\n\x12orchestrationState\x18\x02 \x01(\x0b\x32\x13.OrchestrationState\"Y\n\x15RewindInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x18\n\x16RewindInstanceResponse\"\xa4\x05\n\x12OrchestrationState\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\x13orchestrationStatus\x18\x04 \x01(\x0e\x32\x14.OrchestrationStatus\x12;\n\x17scheduledStartTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x34\n\x10\x63reatedTimestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x38\n\x14lastUpdatedTimestamp\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\x05input\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12,\n\x06output\x18\t \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0c\x63ustomStatus\x18\n \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x0b \x01(\x0b\x32\x13.TaskFailureDetails\x12\x31\n\x0b\x65xecutionId\x18\x0c \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x12\x63ompletedTimestamp\x18\r \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x36\n\x10parentInstanceId\x18\x0e \x01(\x0b\x32\x1c.google.protobuf.StringValue\"b\n\x11RaiseEventRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x14\n\x12RaiseEventResponse\"g\n\x10TerminateRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06output\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x11\n\trecursive\x18\x03 \x01(\x08\"\x13\n\x11TerminateResponse\"R\n\x0eSuspendRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x11\n\x0fSuspendResponse\"Q\n\rResumeRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x10\n\x0eResumeResponse\"6\n\x15QueryInstancesRequest\x12\x1d\n\x05query\x18\x01 \x01(\x0b\x32\x0e.InstanceQuery\"\x82\x03\n\rInstanceQuery\x12+\n\rruntimeStatus\x18\x01 \x03(\x0e\x32\x14.OrchestrationStatus\x12\x33\n\x0f\x63reatedTimeFrom\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rcreatedTimeTo\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0ctaskHubNames\x18\x04 \x03(\x0b\x32\x1c.google.protobuf.StringValue\x12\x18\n\x10maxInstanceCount\x18\x05 \x01(\x05\x12\x37\n\x11\x63ontinuationToken\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10instanceIdPrefix\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1d\n\x15\x66\x65tchInputsAndOutputs\x18\x08 \x01(\x08\"\x82\x01\n\x16QueryInstancesResponse\x12/\n\x12orchestrationState\x18\x01 \x03(\x0b\x32\x13.OrchestrationState\x12\x37\n\x11\x63ontinuationToken\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x80\x01\n\x15PurgeInstancesRequest\x12\x14\n\ninstanceId\x18\x01 \x01(\tH\x00\x12\x33\n\x13purgeInstanceFilter\x18\x02 \x01(\x0b\x32\x14.PurgeInstanceFilterH\x00\x12\x11\n\trecursive\x18\x03 \x01(\x08\x42\t\n\x07request\"\xaa\x01\n\x13PurgeInstanceFilter\x12\x33\n\x0f\x63reatedTimeFrom\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rcreatedTimeTo\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\rruntimeStatus\x18\x03 \x03(\x0e\x32\x14.OrchestrationStatus\"6\n\x16PurgeInstancesResponse\x12\x1c\n\x14\x64\x65letedInstanceCount\x18\x01 \x01(\x05\"0\n\x14\x43reateTaskHubRequest\x12\x18\n\x10recreateIfExists\x18\x01 \x01(\x08\"\x17\n\x15\x43reateTaskHubResponse\"\x16\n\x14\x44\x65leteTaskHubRequest\"\x17\n\x15\x44\x65leteTaskHubResponse\"\xaa\x01\n\x13SignalEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x11\n\trequestId\x18\x04 \x01(\t\x12\x31\n\rscheduledTime\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\x16\n\x14SignalEntityResponse\"<\n\x10GetEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x14\n\x0cincludeState\x18\x02 \x01(\x08\"D\n\x11GetEntityResponse\x12\x0e\n\x06\x65xists\x18\x01 \x01(\x08\x12\x1f\n\x06\x65ntity\x18\x02 \x01(\x0b\x32\x0f.EntityMetadata\"\xcb\x02\n\x0b\x45ntityQuery\x12:\n\x14instanceIdStartsWith\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x34\n\x10lastModifiedFrom\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0elastModifiedTo\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x14\n\x0cincludeState\x18\x04 \x01(\x08\x12\x18\n\x10includeTransient\x18\x05 \x01(\x08\x12-\n\x08pageSize\x18\x06 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12\x37\n\x11\x63ontinuationToken\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"3\n\x14QueryEntitiesRequest\x12\x1b\n\x05query\x18\x01 \x01(\x0b\x32\x0c.EntityQuery\"s\n\x15QueryEntitiesResponse\x12!\n\x08\x65ntities\x18\x01 \x03(\x0b\x32\x0f.EntityMetadata\x12\x37\n\x11\x63ontinuationToken\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xdb\x01\n\x0e\x45ntityMetadata\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x34\n\x10lastModifiedTime\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x18\n\x10\x62\x61\x63klogQueueSize\x18\x03 \x01(\x05\x12.\n\x08lockedBy\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x0fserializedState\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x8f\x01\n\x19\x43leanEntityStorageRequest\x12\x37\n\x11\x63ontinuationToken\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1b\n\x13removeEmptyEntities\x18\x02 \x01(\x08\x12\x1c\n\x14releaseOrphanedLocks\x18\x03 \x01(\x08\"\x92\x01\n\x1a\x43leanEntityStorageResponse\x12\x37\n\x11\x63ontinuationToken\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1c\n\x14\x65mptyEntitiesRemoved\x18\x02 \x01(\x05\x12\x1d\n\x15orphanedLocksReleased\x18\x03 \x01(\x05\"]\n\x1cOrchestratorEntityParameters\x12=\n\x1a\x65ntityMessageReorderWindow\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\"\x82\x01\n\x12\x45ntityBatchRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65ntityState\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12%\n\noperations\x18\x03 \x03(\x0b\x32\x11.OperationRequest\"\xb9\x01\n\x11\x45ntityBatchResult\x12!\n\x07results\x18\x01 \x03(\x0b\x32\x10.OperationResult\x12!\n\x07\x61\x63tions\x18\x02 \x03(\x0b\x32\x10.OperationAction\x12\x31\n\x0b\x65ntityState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\"e\n\x10OperationRequest\x12\x11\n\toperation\x18\x01 \x01(\t\x12\x11\n\trequestId\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"w\n\x0fOperationResult\x12*\n\x07success\x18\x01 \x01(\x0b\x32\x17.OperationResultSuccessH\x00\x12*\n\x07\x66\x61ilure\x18\x02 \x01(\x0b\x32\x17.OperationResultFailureH\x00\x42\x0c\n\nresultType\"F\n\x16OperationResultSuccess\x12,\n\x06result\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"E\n\x16OperationResultFailure\x12+\n\x0e\x66\x61ilureDetails\x18\x01 \x01(\x0b\x32\x13.TaskFailureDetails\"\x9c\x01\n\x0fOperationAction\x12\n\n\x02id\x18\x01 \x01(\x05\x12\'\n\nsendSignal\x18\x02 \x01(\x0b\x32\x11.SendSignalActionH\x00\x12=\n\x15startNewOrchestration\x18\x03 \x01(\x0b\x32\x1c.StartNewOrchestrationActionH\x00\x42\x15\n\x13operationActionType\"\x94\x01\n\x10SendSignalAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rscheduledTime\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xce\x01\n\x1bStartNewOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rscheduledTime\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\x15\n\x13GetWorkItemsRequest\"\xe1\x01\n\x08WorkItem\x12\x33\n\x13orchestratorRequest\x18\x01 \x01(\x0b\x32\x14.OrchestratorRequestH\x00\x12+\n\x0f\x61\x63tivityRequest\x18\x02 \x01(\x0b\x32\x10.ActivityRequestH\x00\x12,\n\rentityRequest\x18\x03 \x01(\x0b\x32\x13.EntityBatchRequestH\x00\x12!\n\nhealthPing\x18\x04 \x01(\x0b\x32\x0b.HealthPingH\x00\x12\x17\n\x0f\x63ompletionToken\x18\n \x01(\tB\t\n\x07request\"\x16\n\x14\x43ompleteTaskResponse\"\x0c\n\nHealthPing*\xb5\x02\n\x13OrchestrationStatus\x12 \n\x1cORCHESTRATION_STATUS_RUNNING\x10\x00\x12\"\n\x1eORCHESTRATION_STATUS_COMPLETED\x10\x01\x12)\n%ORCHESTRATION_STATUS_CONTINUED_AS_NEW\x10\x02\x12\x1f\n\x1bORCHESTRATION_STATUS_FAILED\x10\x03\x12!\n\x1dORCHESTRATION_STATUS_CANCELED\x10\x04\x12#\n\x1fORCHESTRATION_STATUS_TERMINATED\x10\x05\x12 \n\x1cORCHESTRATION_STATUS_PENDING\x10\x06\x12\"\n\x1eORCHESTRATION_STATUS_SUSPENDED\x10\x07*A\n\x19\x43reateOrchestrationAction\x12\t\n\x05\x45RROR\x10\x00\x12\n\n\x06IGNORE\x10\x01\x12\r\n\tTERMINATE\x10\x02\x32\xfc\n\n\x15TaskHubSidecarService\x12\x37\n\x05Hello\x12\x16.google.protobuf.Empty\x1a\x16.google.protobuf.Empty\x12@\n\rStartInstance\x12\x16.CreateInstanceRequest\x1a\x17.CreateInstanceResponse\x12\x38\n\x0bGetInstance\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x41\n\x0eRewindInstance\x12\x16.RewindInstanceRequest\x1a\x17.RewindInstanceResponse\x12\x41\n\x14WaitForInstanceStart\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x46\n\x19WaitForInstanceCompletion\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x35\n\nRaiseEvent\x12\x12.RaiseEventRequest\x1a\x13.RaiseEventResponse\x12:\n\x11TerminateInstance\x12\x11.TerminateRequest\x1a\x12.TerminateResponse\x12\x34\n\x0fSuspendInstance\x12\x0f.SuspendRequest\x1a\x10.SuspendResponse\x12\x31\n\x0eResumeInstance\x12\x0e.ResumeRequest\x1a\x0f.ResumeResponse\x12\x41\n\x0eQueryInstances\x12\x16.QueryInstancesRequest\x1a\x17.QueryInstancesResponse\x12\x41\n\x0ePurgeInstances\x12\x16.PurgeInstancesRequest\x1a\x17.PurgeInstancesResponse\x12\x31\n\x0cGetWorkItems\x12\x14.GetWorkItemsRequest\x1a\t.WorkItem0\x01\x12@\n\x14\x43ompleteActivityTask\x12\x11.ActivityResponse\x1a\x15.CompleteTaskResponse\x12H\n\x18\x43ompleteOrchestratorTask\x12\x15.OrchestratorResponse\x1a\x15.CompleteTaskResponse\x12?\n\x12\x43ompleteEntityTask\x12\x12.EntityBatchResult\x1a\x15.CompleteTaskResponse\x12>\n\rCreateTaskHub\x12\x15.CreateTaskHubRequest\x1a\x16.CreateTaskHubResponse\x12>\n\rDeleteTaskHub\x12\x15.DeleteTaskHubRequest\x1a\x16.DeleteTaskHubResponse\x12;\n\x0cSignalEntity\x12\x14.SignalEntityRequest\x1a\x15.SignalEntityResponse\x12\x32\n\tGetEntity\x12\x11.GetEntityRequest\x1a\x12.GetEntityResponse\x12>\n\rQueryEntities\x12\x15.QueryEntitiesRequest\x1a\x16.QueryEntitiesResponse\x12M\n\x12\x43leanEntityStorage\x12\x1a.CleanEntityStorageRequest\x1a\x1b.CleanEntityStorageResponseBf\n1com.microsoft.durabletask.implementation.protobufZ\x10/internal/protos\xaa\x02\x1eMicrosoft.DurableTask.Protobufb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'orchestrator_service_pb2', _globals) -if not _descriptor._USE_C_DESCRIPTORS: - _globals['DESCRIPTOR']._loaded_options = None +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'durabletask.internal.orchestrator_service_pb2', _globals) +if _descriptor._USE_C_DESCRIPTORS == False: + _globals['DESCRIPTOR']._options = None _globals['DESCRIPTOR']._serialized_options = b'\n1com.microsoft.durabletask.implementation.protobufZ\020/internal/protos\252\002\036Microsoft.DurableTask.Protobuf' - _globals['_TRACECONTEXT'].fields_by_name['spanID']._loaded_options = None + _globals['_TRACECONTEXT'].fields_by_name['spanID']._options = None _globals['_TRACECONTEXT'].fields_by_name['spanID']._serialized_options = b'\030\001' - _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._loaded_options = None + _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._options = None _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_options = b'8\001' - _globals['_ORCHESTRATIONSTATUS']._serialized_start=12076 - _globals['_ORCHESTRATIONSTATUS']._serialized_end=12385 - _globals['_CREATEORCHESTRATIONACTION']._serialized_start=12387 - _globals['_CREATEORCHESTRATIONACTION']._serialized_end=12452 - _globals['_ORCHESTRATIONINSTANCE']._serialized_start=156 - _globals['_ORCHESTRATIONINSTANCE']._serialized_end=250 - _globals['_ACTIVITYREQUEST']._serialized_start=253 - _globals['_ACTIVITYREQUEST']._serialized_end=490 - _globals['_ACTIVITYRESPONSE']._serialized_start=493 - _globals['_ACTIVITYRESPONSE']._serialized_end=638 - _globals['_TASKFAILUREDETAILS']._serialized_start=641 - _globals['_TASKFAILUREDETAILS']._serialized_end=819 - _globals['_PARENTINSTANCEINFO']._serialized_start=822 - _globals['_PARENTINSTANCEINFO']._serialized_end=1013 - _globals['_TRACECONTEXT']._serialized_start=1015 - _globals['_TRACECONTEXT']._serialized_end=1120 - _globals['_EXECUTIONSTARTEDEVENT']._serialized_start=1123 - _globals['_EXECUTIONSTARTEDEVENT']._serialized_end=1515 - _globals['_EXECUTIONCOMPLETEDEVENT']._serialized_start=1518 - _globals['_EXECUTIONCOMPLETEDEVENT']._serialized_end=1685 - _globals['_EXECUTIONTERMINATEDEVENT']._serialized_start=1687 - _globals['_EXECUTIONTERMINATEDEVENT']._serialized_end=1775 - _globals['_TASKSCHEDULEDEVENT']._serialized_start=1778 - _globals['_TASKSCHEDULEDEVENT']._serialized_end=1947 - _globals['_TASKCOMPLETEDEVENT']._serialized_start=1949 - _globals['_TASKCOMPLETEDEVENT']._serialized_end=2040 - _globals['_TASKFAILEDEVENT']._serialized_start=2042 - _globals['_TASKFAILEDEVENT']._serialized_end=2129 - _globals['_SUBORCHESTRATIONINSTANCECREATEDEVENT']._serialized_start=2132 - _globals['_SUBORCHESTRATIONINSTANCECREATEDEVENT']._serialized_end=2339 - _globals['_SUBORCHESTRATIONINSTANCECOMPLETEDEVENT']._serialized_start=2341 - _globals['_SUBORCHESTRATIONINSTANCECOMPLETEDEVENT']._serialized_end=2452 - _globals['_SUBORCHESTRATIONINSTANCEFAILEDEVENT']._serialized_start=2454 - _globals['_SUBORCHESTRATIONINSTANCEFAILEDEVENT']._serialized_end=2561 - _globals['_TIMERCREATEDEVENT']._serialized_start=2563 - _globals['_TIMERCREATEDEVENT']._serialized_end=2626 - _globals['_TIMERFIREDEVENT']._serialized_start=2628 - _globals['_TIMERFIREDEVENT']._serialized_end=2706 - _globals['_ORCHESTRATORSTARTEDEVENT']._serialized_start=2708 - _globals['_ORCHESTRATORSTARTEDEVENT']._serialized_end=2734 - _globals['_ORCHESTRATORCOMPLETEDEVENT']._serialized_start=2736 - _globals['_ORCHESTRATORCOMPLETEDEVENT']._serialized_end=2764 - _globals['_EVENTSENTEVENT']._serialized_start=2766 - _globals['_EVENTSENTEVENT']._serialized_end=2861 - _globals['_EVENTRAISEDEVENT']._serialized_start=2863 - _globals['_EVENTRAISEDEVENT']._serialized_end=2940 - _globals['_GENERICEVENT']._serialized_start=2942 - _globals['_GENERICEVENT']._serialized_end=3000 - _globals['_HISTORYSTATEEVENT']._serialized_start=3002 - _globals['_HISTORYSTATEEVENT']._serialized_end=3070 - _globals['_CONTINUEASNEWEVENT']._serialized_start=3072 - _globals['_CONTINUEASNEWEVENT']._serialized_end=3137 - _globals['_EXECUTIONSUSPENDEDEVENT']._serialized_start=3139 - _globals['_EXECUTIONSUSPENDEDEVENT']._serialized_end=3209 - _globals['_EXECUTIONRESUMEDEVENT']._serialized_start=3211 - _globals['_EXECUTIONRESUMEDEVENT']._serialized_end=3279 - _globals['_HISTORYEVENT']._serialized_start=3282 - _globals['_HISTORYEVENT']._serialized_end=4440 - _globals['_SCHEDULETASKACTION']._serialized_start=4442 - _globals['_SCHEDULETASKACTION']._serialized_end=4568 - _globals['_CREATESUBORCHESTRATIONACTION']._serialized_start=4571 - _globals['_CREATESUBORCHESTRATIONACTION']._serialized_end=4727 - _globals['_CREATETIMERACTION']._serialized_start=4729 - _globals['_CREATETIMERACTION']._serialized_end=4792 - _globals['_SENDEVENTACTION']._serialized_start=4794 - _globals['_SENDEVENTACTION']._serialized_end=4911 - _globals['_COMPLETEORCHESTRATIONACTION']._serialized_start=4914 - _globals['_COMPLETEORCHESTRATIONACTION']._serialized_end=5222 - _globals['_TERMINATEORCHESTRATIONACTION']._serialized_start=5224 - _globals['_TERMINATEORCHESTRATIONACTION']._serialized_end=5337 - _globals['_ORCHESTRATORACTION']._serialized_start=5340 - _globals['_ORCHESTRATORACTION']._serialized_end=5718 - _globals['_ORCHESTRATORREQUEST']._serialized_start=5721 - _globals['_ORCHESTRATORREQUEST']._serialized_end=5939 - _globals['_ORCHESTRATORRESPONSE']._serialized_start=5942 - _globals['_ORCHESTRATORRESPONSE']._serialized_end=6074 - _globals['_CREATEINSTANCEREQUEST']._serialized_start=6077 - _globals['_CREATEINSTANCEREQUEST']._serialized_end=6496 - _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_start=6453 - _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_end=6496 - _globals['_ORCHESTRATIONIDREUSEPOLICY']._serialized_start=6498 - _globals['_ORCHESTRATIONIDREUSEPOLICY']._serialized_end=6617 - _globals['_CREATEINSTANCERESPONSE']._serialized_start=6619 - _globals['_CREATEINSTANCERESPONSE']._serialized_end=6663 - _globals['_GETINSTANCEREQUEST']._serialized_start=6665 - _globals['_GETINSTANCEREQUEST']._serialized_end=6734 - _globals['_GETINSTANCERESPONSE']._serialized_start=6736 - _globals['_GETINSTANCERESPONSE']._serialized_end=6822 - _globals['_REWINDINSTANCEREQUEST']._serialized_start=6824 - _globals['_REWINDINSTANCEREQUEST']._serialized_end=6913 - _globals['_REWINDINSTANCERESPONSE']._serialized_start=6915 - _globals['_REWINDINSTANCERESPONSE']._serialized_end=6939 - _globals['_ORCHESTRATIONSTATE']._serialized_start=6942 - _globals['_ORCHESTRATIONSTATE']._serialized_end=7618 - _globals['_RAISEEVENTREQUEST']._serialized_start=7620 - _globals['_RAISEEVENTREQUEST']._serialized_end=7718 - _globals['_RAISEEVENTRESPONSE']._serialized_start=7720 - _globals['_RAISEEVENTRESPONSE']._serialized_end=7740 - _globals['_TERMINATEREQUEST']._serialized_start=7742 - _globals['_TERMINATEREQUEST']._serialized_end=7845 - _globals['_TERMINATERESPONSE']._serialized_start=7847 - _globals['_TERMINATERESPONSE']._serialized_end=7866 - _globals['_SUSPENDREQUEST']._serialized_start=7868 - _globals['_SUSPENDREQUEST']._serialized_end=7950 - _globals['_SUSPENDRESPONSE']._serialized_start=7952 - _globals['_SUSPENDRESPONSE']._serialized_end=7969 - _globals['_RESUMEREQUEST']._serialized_start=7971 - _globals['_RESUMEREQUEST']._serialized_end=8052 - _globals['_RESUMERESPONSE']._serialized_start=8054 - _globals['_RESUMERESPONSE']._serialized_end=8070 - _globals['_QUERYINSTANCESREQUEST']._serialized_start=8072 - _globals['_QUERYINSTANCESREQUEST']._serialized_end=8126 - _globals['_INSTANCEQUERY']._serialized_start=8129 - _globals['_INSTANCEQUERY']._serialized_end=8515 - _globals['_QUERYINSTANCESRESPONSE']._serialized_start=8518 - _globals['_QUERYINSTANCESRESPONSE']._serialized_end=8648 - _globals['_PURGEINSTANCESREQUEST']._serialized_start=8651 - _globals['_PURGEINSTANCESREQUEST']._serialized_end=8779 - _globals['_PURGEINSTANCEFILTER']._serialized_start=8782 - _globals['_PURGEINSTANCEFILTER']._serialized_end=8952 - _globals['_PURGEINSTANCESRESPONSE']._serialized_start=8954 - _globals['_PURGEINSTANCESRESPONSE']._serialized_end=9008 - _globals['_CREATETASKHUBREQUEST']._serialized_start=9010 - _globals['_CREATETASKHUBREQUEST']._serialized_end=9058 - _globals['_CREATETASKHUBRESPONSE']._serialized_start=9060 - _globals['_CREATETASKHUBRESPONSE']._serialized_end=9083 - _globals['_DELETETASKHUBREQUEST']._serialized_start=9085 - _globals['_DELETETASKHUBREQUEST']._serialized_end=9107 - _globals['_DELETETASKHUBRESPONSE']._serialized_start=9109 - _globals['_DELETETASKHUBRESPONSE']._serialized_end=9132 - _globals['_SIGNALENTITYREQUEST']._serialized_start=9135 - _globals['_SIGNALENTITYREQUEST']._serialized_end=9305 - _globals['_SIGNALENTITYRESPONSE']._serialized_start=9307 - _globals['_SIGNALENTITYRESPONSE']._serialized_end=9329 - _globals['_GETENTITYREQUEST']._serialized_start=9331 - _globals['_GETENTITYREQUEST']._serialized_end=9391 - _globals['_GETENTITYRESPONSE']._serialized_start=9393 - _globals['_GETENTITYRESPONSE']._serialized_end=9461 - _globals['_ENTITYQUERY']._serialized_start=9464 - _globals['_ENTITYQUERY']._serialized_end=9795 - _globals['_QUERYENTITIESREQUEST']._serialized_start=9797 - _globals['_QUERYENTITIESREQUEST']._serialized_end=9848 - _globals['_QUERYENTITIESRESPONSE']._serialized_start=9850 - _globals['_QUERYENTITIESRESPONSE']._serialized_end=9965 - _globals['_ENTITYMETADATA']._serialized_start=9968 - _globals['_ENTITYMETADATA']._serialized_end=10187 - _globals['_CLEANENTITYSTORAGEREQUEST']._serialized_start=10190 - _globals['_CLEANENTITYSTORAGEREQUEST']._serialized_end=10333 - _globals['_CLEANENTITYSTORAGERESPONSE']._serialized_start=10336 - _globals['_CLEANENTITYSTORAGERESPONSE']._serialized_end=10482 - _globals['_ORCHESTRATORENTITYPARAMETERS']._serialized_start=10484 - _globals['_ORCHESTRATORENTITYPARAMETERS']._serialized_end=10577 - _globals['_ENTITYBATCHREQUEST']._serialized_start=10580 - _globals['_ENTITYBATCHREQUEST']._serialized_end=10710 - _globals['_ENTITYBATCHRESULT']._serialized_start=10713 - _globals['_ENTITYBATCHRESULT']._serialized_end=10898 - _globals['_OPERATIONREQUEST']._serialized_start=10900 - _globals['_OPERATIONREQUEST']._serialized_end=11001 - _globals['_OPERATIONRESULT']._serialized_start=11003 - _globals['_OPERATIONRESULT']._serialized_end=11122 - _globals['_OPERATIONRESULTSUCCESS']._serialized_start=11124 - _globals['_OPERATIONRESULTSUCCESS']._serialized_end=11194 - _globals['_OPERATIONRESULTFAILURE']._serialized_start=11196 - _globals['_OPERATIONRESULTFAILURE']._serialized_end=11265 - _globals['_OPERATIONACTION']._serialized_start=11268 - _globals['_OPERATIONACTION']._serialized_end=11424 - _globals['_SENDSIGNALACTION']._serialized_start=11427 - _globals['_SENDSIGNALACTION']._serialized_end=11575 - _globals['_STARTNEWORCHESTRATIONACTION']._serialized_start=11578 - _globals['_STARTNEWORCHESTRATIONACTION']._serialized_end=11784 - _globals['_GETWORKITEMSREQUEST']._serialized_start=11786 - _globals['_GETWORKITEMSREQUEST']._serialized_end=11807 - _globals['_WORKITEM']._serialized_start=11810 - _globals['_WORKITEM']._serialized_end=12035 - _globals['_COMPLETETASKRESPONSE']._serialized_start=12037 - _globals['_COMPLETETASKRESPONSE']._serialized_end=12059 - _globals['_HEALTHPING']._serialized_start=12061 - _globals['_HEALTHPING']._serialized_end=12073 - _globals['_TASKHUBSIDECARSERVICE']._serialized_start=12455 - _globals['_TASKHUBSIDECARSERVICE']._serialized_end=13859 + _globals['_ORCHESTRATIONSTATUS']._serialized_start=12097 + _globals['_ORCHESTRATIONSTATUS']._serialized_end=12406 + _globals['_CREATEORCHESTRATIONACTION']._serialized_start=12408 + _globals['_CREATEORCHESTRATIONACTION']._serialized_end=12473 + _globals['_ORCHESTRATIONINSTANCE']._serialized_start=177 + _globals['_ORCHESTRATIONINSTANCE']._serialized_end=271 + _globals['_ACTIVITYREQUEST']._serialized_start=274 + _globals['_ACTIVITYREQUEST']._serialized_end=511 + _globals['_ACTIVITYRESPONSE']._serialized_start=514 + _globals['_ACTIVITYRESPONSE']._serialized_end=659 + _globals['_TASKFAILUREDETAILS']._serialized_start=662 + _globals['_TASKFAILUREDETAILS']._serialized_end=840 + _globals['_PARENTINSTANCEINFO']._serialized_start=843 + _globals['_PARENTINSTANCEINFO']._serialized_end=1034 + _globals['_TRACECONTEXT']._serialized_start=1036 + _globals['_TRACECONTEXT']._serialized_end=1141 + _globals['_EXECUTIONSTARTEDEVENT']._serialized_start=1144 + _globals['_EXECUTIONSTARTEDEVENT']._serialized_end=1536 + _globals['_EXECUTIONCOMPLETEDEVENT']._serialized_start=1539 + _globals['_EXECUTIONCOMPLETEDEVENT']._serialized_end=1706 + _globals['_EXECUTIONTERMINATEDEVENT']._serialized_start=1708 + _globals['_EXECUTIONTERMINATEDEVENT']._serialized_end=1796 + _globals['_TASKSCHEDULEDEVENT']._serialized_start=1799 + _globals['_TASKSCHEDULEDEVENT']._serialized_end=1968 + _globals['_TASKCOMPLETEDEVENT']._serialized_start=1970 + _globals['_TASKCOMPLETEDEVENT']._serialized_end=2061 + _globals['_TASKFAILEDEVENT']._serialized_start=2063 + _globals['_TASKFAILEDEVENT']._serialized_end=2150 + _globals['_SUBORCHESTRATIONINSTANCECREATEDEVENT']._serialized_start=2153 + _globals['_SUBORCHESTRATIONINSTANCECREATEDEVENT']._serialized_end=2360 + _globals['_SUBORCHESTRATIONINSTANCECOMPLETEDEVENT']._serialized_start=2362 + _globals['_SUBORCHESTRATIONINSTANCECOMPLETEDEVENT']._serialized_end=2473 + _globals['_SUBORCHESTRATIONINSTANCEFAILEDEVENT']._serialized_start=2475 + _globals['_SUBORCHESTRATIONINSTANCEFAILEDEVENT']._serialized_end=2582 + _globals['_TIMERCREATEDEVENT']._serialized_start=2584 + _globals['_TIMERCREATEDEVENT']._serialized_end=2647 + _globals['_TIMERFIREDEVENT']._serialized_start=2649 + _globals['_TIMERFIREDEVENT']._serialized_end=2727 + _globals['_ORCHESTRATORSTARTEDEVENT']._serialized_start=2729 + _globals['_ORCHESTRATORSTARTEDEVENT']._serialized_end=2755 + _globals['_ORCHESTRATORCOMPLETEDEVENT']._serialized_start=2757 + _globals['_ORCHESTRATORCOMPLETEDEVENT']._serialized_end=2785 + _globals['_EVENTSENTEVENT']._serialized_start=2787 + _globals['_EVENTSENTEVENT']._serialized_end=2882 + _globals['_EVENTRAISEDEVENT']._serialized_start=2884 + _globals['_EVENTRAISEDEVENT']._serialized_end=2961 + _globals['_GENERICEVENT']._serialized_start=2963 + _globals['_GENERICEVENT']._serialized_end=3021 + _globals['_HISTORYSTATEEVENT']._serialized_start=3023 + _globals['_HISTORYSTATEEVENT']._serialized_end=3091 + _globals['_CONTINUEASNEWEVENT']._serialized_start=3093 + _globals['_CONTINUEASNEWEVENT']._serialized_end=3158 + _globals['_EXECUTIONSUSPENDEDEVENT']._serialized_start=3160 + _globals['_EXECUTIONSUSPENDEDEVENT']._serialized_end=3230 + _globals['_EXECUTIONRESUMEDEVENT']._serialized_start=3232 + _globals['_EXECUTIONRESUMEDEVENT']._serialized_end=3300 + _globals['_HISTORYEVENT']._serialized_start=3303 + _globals['_HISTORYEVENT']._serialized_end=4461 + _globals['_SCHEDULETASKACTION']._serialized_start=4463 + _globals['_SCHEDULETASKACTION']._serialized_end=4589 + _globals['_CREATESUBORCHESTRATIONACTION']._serialized_start=4592 + _globals['_CREATESUBORCHESTRATIONACTION']._serialized_end=4748 + _globals['_CREATETIMERACTION']._serialized_start=4750 + _globals['_CREATETIMERACTION']._serialized_end=4813 + _globals['_SENDEVENTACTION']._serialized_start=4815 + _globals['_SENDEVENTACTION']._serialized_end=4932 + _globals['_COMPLETEORCHESTRATIONACTION']._serialized_start=4935 + _globals['_COMPLETEORCHESTRATIONACTION']._serialized_end=5243 + _globals['_TERMINATEORCHESTRATIONACTION']._serialized_start=5245 + _globals['_TERMINATEORCHESTRATIONACTION']._serialized_end=5358 + _globals['_ORCHESTRATORACTION']._serialized_start=5361 + _globals['_ORCHESTRATORACTION']._serialized_end=5739 + _globals['_ORCHESTRATORREQUEST']._serialized_start=5742 + _globals['_ORCHESTRATORREQUEST']._serialized_end=5960 + _globals['_ORCHESTRATORRESPONSE']._serialized_start=5963 + _globals['_ORCHESTRATORRESPONSE']._serialized_end=6095 + _globals['_CREATEINSTANCEREQUEST']._serialized_start=6098 + _globals['_CREATEINSTANCEREQUEST']._serialized_end=6517 + _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_start=6474 + _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_end=6517 + _globals['_ORCHESTRATIONIDREUSEPOLICY']._serialized_start=6519 + _globals['_ORCHESTRATIONIDREUSEPOLICY']._serialized_end=6638 + _globals['_CREATEINSTANCERESPONSE']._serialized_start=6640 + _globals['_CREATEINSTANCERESPONSE']._serialized_end=6684 + _globals['_GETINSTANCEREQUEST']._serialized_start=6686 + _globals['_GETINSTANCEREQUEST']._serialized_end=6755 + _globals['_GETINSTANCERESPONSE']._serialized_start=6757 + _globals['_GETINSTANCERESPONSE']._serialized_end=6843 + _globals['_REWINDINSTANCEREQUEST']._serialized_start=6845 + _globals['_REWINDINSTANCEREQUEST']._serialized_end=6934 + _globals['_REWINDINSTANCERESPONSE']._serialized_start=6936 + _globals['_REWINDINSTANCERESPONSE']._serialized_end=6960 + _globals['_ORCHESTRATIONSTATE']._serialized_start=6963 + _globals['_ORCHESTRATIONSTATE']._serialized_end=7639 + _globals['_RAISEEVENTREQUEST']._serialized_start=7641 + _globals['_RAISEEVENTREQUEST']._serialized_end=7739 + _globals['_RAISEEVENTRESPONSE']._serialized_start=7741 + _globals['_RAISEEVENTRESPONSE']._serialized_end=7761 + _globals['_TERMINATEREQUEST']._serialized_start=7763 + _globals['_TERMINATEREQUEST']._serialized_end=7866 + _globals['_TERMINATERESPONSE']._serialized_start=7868 + _globals['_TERMINATERESPONSE']._serialized_end=7887 + _globals['_SUSPENDREQUEST']._serialized_start=7889 + _globals['_SUSPENDREQUEST']._serialized_end=7971 + _globals['_SUSPENDRESPONSE']._serialized_start=7973 + _globals['_SUSPENDRESPONSE']._serialized_end=7990 + _globals['_RESUMEREQUEST']._serialized_start=7992 + _globals['_RESUMEREQUEST']._serialized_end=8073 + _globals['_RESUMERESPONSE']._serialized_start=8075 + _globals['_RESUMERESPONSE']._serialized_end=8091 + _globals['_QUERYINSTANCESREQUEST']._serialized_start=8093 + _globals['_QUERYINSTANCESREQUEST']._serialized_end=8147 + _globals['_INSTANCEQUERY']._serialized_start=8150 + _globals['_INSTANCEQUERY']._serialized_end=8536 + _globals['_QUERYINSTANCESRESPONSE']._serialized_start=8539 + _globals['_QUERYINSTANCESRESPONSE']._serialized_end=8669 + _globals['_PURGEINSTANCESREQUEST']._serialized_start=8672 + _globals['_PURGEINSTANCESREQUEST']._serialized_end=8800 + _globals['_PURGEINSTANCEFILTER']._serialized_start=8803 + _globals['_PURGEINSTANCEFILTER']._serialized_end=8973 + _globals['_PURGEINSTANCESRESPONSE']._serialized_start=8975 + _globals['_PURGEINSTANCESRESPONSE']._serialized_end=9029 + _globals['_CREATETASKHUBREQUEST']._serialized_start=9031 + _globals['_CREATETASKHUBREQUEST']._serialized_end=9079 + _globals['_CREATETASKHUBRESPONSE']._serialized_start=9081 + _globals['_CREATETASKHUBRESPONSE']._serialized_end=9104 + _globals['_DELETETASKHUBREQUEST']._serialized_start=9106 + _globals['_DELETETASKHUBREQUEST']._serialized_end=9128 + _globals['_DELETETASKHUBRESPONSE']._serialized_start=9130 + _globals['_DELETETASKHUBRESPONSE']._serialized_end=9153 + _globals['_SIGNALENTITYREQUEST']._serialized_start=9156 + _globals['_SIGNALENTITYREQUEST']._serialized_end=9326 + _globals['_SIGNALENTITYRESPONSE']._serialized_start=9328 + _globals['_SIGNALENTITYRESPONSE']._serialized_end=9350 + _globals['_GETENTITYREQUEST']._serialized_start=9352 + _globals['_GETENTITYREQUEST']._serialized_end=9412 + _globals['_GETENTITYRESPONSE']._serialized_start=9414 + _globals['_GETENTITYRESPONSE']._serialized_end=9482 + _globals['_ENTITYQUERY']._serialized_start=9485 + _globals['_ENTITYQUERY']._serialized_end=9816 + _globals['_QUERYENTITIESREQUEST']._serialized_start=9818 + _globals['_QUERYENTITIESREQUEST']._serialized_end=9869 + _globals['_QUERYENTITIESRESPONSE']._serialized_start=9871 + _globals['_QUERYENTITIESRESPONSE']._serialized_end=9986 + _globals['_ENTITYMETADATA']._serialized_start=9989 + _globals['_ENTITYMETADATA']._serialized_end=10208 + _globals['_CLEANENTITYSTORAGEREQUEST']._serialized_start=10211 + _globals['_CLEANENTITYSTORAGEREQUEST']._serialized_end=10354 + _globals['_CLEANENTITYSTORAGERESPONSE']._serialized_start=10357 + _globals['_CLEANENTITYSTORAGERESPONSE']._serialized_end=10503 + _globals['_ORCHESTRATORENTITYPARAMETERS']._serialized_start=10505 + _globals['_ORCHESTRATORENTITYPARAMETERS']._serialized_end=10598 + _globals['_ENTITYBATCHREQUEST']._serialized_start=10601 + _globals['_ENTITYBATCHREQUEST']._serialized_end=10731 + _globals['_ENTITYBATCHRESULT']._serialized_start=10734 + _globals['_ENTITYBATCHRESULT']._serialized_end=10919 + _globals['_OPERATIONREQUEST']._serialized_start=10921 + _globals['_OPERATIONREQUEST']._serialized_end=11022 + _globals['_OPERATIONRESULT']._serialized_start=11024 + _globals['_OPERATIONRESULT']._serialized_end=11143 + _globals['_OPERATIONRESULTSUCCESS']._serialized_start=11145 + _globals['_OPERATIONRESULTSUCCESS']._serialized_end=11215 + _globals['_OPERATIONRESULTFAILURE']._serialized_start=11217 + _globals['_OPERATIONRESULTFAILURE']._serialized_end=11286 + _globals['_OPERATIONACTION']._serialized_start=11289 + _globals['_OPERATIONACTION']._serialized_end=11445 + _globals['_SENDSIGNALACTION']._serialized_start=11448 + _globals['_SENDSIGNALACTION']._serialized_end=11596 + _globals['_STARTNEWORCHESTRATIONACTION']._serialized_start=11599 + _globals['_STARTNEWORCHESTRATIONACTION']._serialized_end=11805 + _globals['_GETWORKITEMSREQUEST']._serialized_start=11807 + _globals['_GETWORKITEMSREQUEST']._serialized_end=11828 + _globals['_WORKITEM']._serialized_start=11831 + _globals['_WORKITEM']._serialized_end=12056 + _globals['_COMPLETETASKRESPONSE']._serialized_start=12058 + _globals['_COMPLETETASKRESPONSE']._serialized_end=12080 + _globals['_HEALTHPING']._serialized_start=12082 + _globals['_HEALTHPING']._serialized_end=12094 + _globals['_TASKHUBSIDECARSERVICE']._serialized_start=12476 + _globals['_TASKHUBSIDECARSERVICE']._serialized_end=13880 # @@protoc_insertion_point(module_scope) diff --git a/durabletask/internal/orchestrator_service_pb2_grpc.py b/durabletask/internal/orchestrator_service_pb2_grpc.py index f11cf4b..3638bf6 100644 --- a/durabletask/internal/orchestrator_service_pb2_grpc.py +++ b/durabletask/internal/orchestrator_service_pb2_grpc.py @@ -1,32 +1,10 @@ # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! """Client and server classes corresponding to protobuf-defined services.""" import grpc -import warnings +from durabletask.internal import orchestrator_service_pb2 as durabletask_dot_internal_dot_orchestrator__service__pb2 from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -# TODO: This is a manual edit. Need to figure out how to not manually edit this file. -import durabletask.internal.orchestrator_service_pb2 as orchestrator__service__pb2 - -GRPC_GENERATED_VERSION = '1.67.0' -GRPC_VERSION = grpc.__version__ -_version_not_supported = False - -try: - from grpc._utilities import first_version_is_lower - _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION) -except ImportError: - _version_not_supported = True - -if _version_not_supported: - raise RuntimeError( - f'The grpc package installed is at version {GRPC_VERSION},' - + f' but the generated code in orchestrator_service_pb2_grpc.py depends on' - + f' grpcio>={GRPC_GENERATED_VERSION}.' - + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}' - + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.' - ) - class TaskHubSidecarServiceStub(object): """Missing associated documentation comment in .proto file.""" @@ -41,112 +19,112 @@ def __init__(self, channel): '/TaskHubSidecarService/Hello', request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - _registered_method=True) + ) self.StartInstance = channel.unary_unary( '/TaskHubSidecarService/StartInstance', - request_serializer=orchestrator__service__pb2.CreateInstanceRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.CreateInstanceResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CreateInstanceRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CreateInstanceResponse.FromString, + ) self.GetInstance = channel.unary_unary( '/TaskHubSidecarService/GetInstance', - request_serializer=orchestrator__service__pb2.GetInstanceRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.GetInstanceResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceResponse.FromString, + ) self.RewindInstance = channel.unary_unary( '/TaskHubSidecarService/RewindInstance', - request_serializer=orchestrator__service__pb2.RewindInstanceRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.RewindInstanceResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RewindInstanceRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RewindInstanceResponse.FromString, + ) self.WaitForInstanceStart = channel.unary_unary( '/TaskHubSidecarService/WaitForInstanceStart', - request_serializer=orchestrator__service__pb2.GetInstanceRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.GetInstanceResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceResponse.FromString, + ) self.WaitForInstanceCompletion = channel.unary_unary( '/TaskHubSidecarService/WaitForInstanceCompletion', - request_serializer=orchestrator__service__pb2.GetInstanceRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.GetInstanceResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceResponse.FromString, + ) self.RaiseEvent = channel.unary_unary( '/TaskHubSidecarService/RaiseEvent', - request_serializer=orchestrator__service__pb2.RaiseEventRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.RaiseEventResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RaiseEventRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RaiseEventResponse.FromString, + ) self.TerminateInstance = channel.unary_unary( '/TaskHubSidecarService/TerminateInstance', - request_serializer=orchestrator__service__pb2.TerminateRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.TerminateResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.TerminateRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.TerminateResponse.FromString, + ) self.SuspendInstance = channel.unary_unary( '/TaskHubSidecarService/SuspendInstance', - request_serializer=orchestrator__service__pb2.SuspendRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.SuspendResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.SuspendRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.SuspendResponse.FromString, + ) self.ResumeInstance = channel.unary_unary( '/TaskHubSidecarService/ResumeInstance', - request_serializer=orchestrator__service__pb2.ResumeRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.ResumeResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.ResumeRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.ResumeResponse.FromString, + ) self.QueryInstances = channel.unary_unary( '/TaskHubSidecarService/QueryInstances', - request_serializer=orchestrator__service__pb2.QueryInstancesRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.QueryInstancesResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.QueryInstancesRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.QueryInstancesResponse.FromString, + ) self.PurgeInstances = channel.unary_unary( '/TaskHubSidecarService/PurgeInstances', - request_serializer=orchestrator__service__pb2.PurgeInstancesRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.PurgeInstancesResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.PurgeInstancesRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.PurgeInstancesResponse.FromString, + ) self.GetWorkItems = channel.unary_stream( '/TaskHubSidecarService/GetWorkItems', - request_serializer=orchestrator__service__pb2.GetWorkItemsRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.WorkItem.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetWorkItemsRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.WorkItem.FromString, + ) self.CompleteActivityTask = channel.unary_unary( '/TaskHubSidecarService/CompleteActivityTask', - request_serializer=orchestrator__service__pb2.ActivityResponse.SerializeToString, - response_deserializer=orchestrator__service__pb2.CompleteTaskResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.ActivityResponse.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CompleteTaskResponse.FromString, + ) self.CompleteOrchestratorTask = channel.unary_unary( '/TaskHubSidecarService/CompleteOrchestratorTask', - request_serializer=orchestrator__service__pb2.OrchestratorResponse.SerializeToString, - response_deserializer=orchestrator__service__pb2.CompleteTaskResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.OrchestratorResponse.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CompleteTaskResponse.FromString, + ) self.CompleteEntityTask = channel.unary_unary( '/TaskHubSidecarService/CompleteEntityTask', - request_serializer=orchestrator__service__pb2.EntityBatchResult.SerializeToString, - response_deserializer=orchestrator__service__pb2.CompleteTaskResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.EntityBatchResult.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CompleteTaskResponse.FromString, + ) self.CreateTaskHub = channel.unary_unary( '/TaskHubSidecarService/CreateTaskHub', - request_serializer=orchestrator__service__pb2.CreateTaskHubRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.CreateTaskHubResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CreateTaskHubRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CreateTaskHubResponse.FromString, + ) self.DeleteTaskHub = channel.unary_unary( '/TaskHubSidecarService/DeleteTaskHub', - request_serializer=orchestrator__service__pb2.DeleteTaskHubRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.DeleteTaskHubResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.DeleteTaskHubRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.DeleteTaskHubResponse.FromString, + ) self.SignalEntity = channel.unary_unary( '/TaskHubSidecarService/SignalEntity', - request_serializer=orchestrator__service__pb2.SignalEntityRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.SignalEntityResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.SignalEntityRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.SignalEntityResponse.FromString, + ) self.GetEntity = channel.unary_unary( '/TaskHubSidecarService/GetEntity', - request_serializer=orchestrator__service__pb2.GetEntityRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.GetEntityResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetEntityRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetEntityResponse.FromString, + ) self.QueryEntities = channel.unary_unary( '/TaskHubSidecarService/QueryEntities', - request_serializer=orchestrator__service__pb2.QueryEntitiesRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.QueryEntitiesResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.QueryEntitiesRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.QueryEntitiesResponse.FromString, + ) self.CleanEntityStorage = channel.unary_unary( '/TaskHubSidecarService/CleanEntityStorage', - request_serializer=orchestrator__service__pb2.CleanEntityStorageRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.CleanEntityStorageResponse.FromString, - _registered_method=True) + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CleanEntityStorageRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CleanEntityStorageResponse.FromString, + ) class TaskHubSidecarServiceServicer(object): @@ -312,114 +290,113 @@ def add_TaskHubSidecarServiceServicer_to_server(servicer, server): ), 'StartInstance': grpc.unary_unary_rpc_method_handler( servicer.StartInstance, - request_deserializer=orchestrator__service__pb2.CreateInstanceRequest.FromString, - response_serializer=orchestrator__service__pb2.CreateInstanceResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CreateInstanceRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CreateInstanceResponse.SerializeToString, ), 'GetInstance': grpc.unary_unary_rpc_method_handler( servicer.GetInstance, - request_deserializer=orchestrator__service__pb2.GetInstanceRequest.FromString, - response_serializer=orchestrator__service__pb2.GetInstanceResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceResponse.SerializeToString, ), 'RewindInstance': grpc.unary_unary_rpc_method_handler( servicer.RewindInstance, - request_deserializer=orchestrator__service__pb2.RewindInstanceRequest.FromString, - response_serializer=orchestrator__service__pb2.RewindInstanceResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RewindInstanceRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RewindInstanceResponse.SerializeToString, ), 'WaitForInstanceStart': grpc.unary_unary_rpc_method_handler( servicer.WaitForInstanceStart, - request_deserializer=orchestrator__service__pb2.GetInstanceRequest.FromString, - response_serializer=orchestrator__service__pb2.GetInstanceResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceResponse.SerializeToString, ), 'WaitForInstanceCompletion': grpc.unary_unary_rpc_method_handler( servicer.WaitForInstanceCompletion, - request_deserializer=orchestrator__service__pb2.GetInstanceRequest.FromString, - response_serializer=orchestrator__service__pb2.GetInstanceResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceResponse.SerializeToString, ), 'RaiseEvent': grpc.unary_unary_rpc_method_handler( servicer.RaiseEvent, - request_deserializer=orchestrator__service__pb2.RaiseEventRequest.FromString, - response_serializer=orchestrator__service__pb2.RaiseEventResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RaiseEventRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RaiseEventResponse.SerializeToString, ), 'TerminateInstance': grpc.unary_unary_rpc_method_handler( servicer.TerminateInstance, - request_deserializer=orchestrator__service__pb2.TerminateRequest.FromString, - response_serializer=orchestrator__service__pb2.TerminateResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.TerminateRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.TerminateResponse.SerializeToString, ), 'SuspendInstance': grpc.unary_unary_rpc_method_handler( servicer.SuspendInstance, - request_deserializer=orchestrator__service__pb2.SuspendRequest.FromString, - response_serializer=orchestrator__service__pb2.SuspendResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.SuspendRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.SuspendResponse.SerializeToString, ), 'ResumeInstance': grpc.unary_unary_rpc_method_handler( servicer.ResumeInstance, - request_deserializer=orchestrator__service__pb2.ResumeRequest.FromString, - response_serializer=orchestrator__service__pb2.ResumeResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.ResumeRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.ResumeResponse.SerializeToString, ), 'QueryInstances': grpc.unary_unary_rpc_method_handler( servicer.QueryInstances, - request_deserializer=orchestrator__service__pb2.QueryInstancesRequest.FromString, - response_serializer=orchestrator__service__pb2.QueryInstancesResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.QueryInstancesRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.QueryInstancesResponse.SerializeToString, ), 'PurgeInstances': grpc.unary_unary_rpc_method_handler( servicer.PurgeInstances, - request_deserializer=orchestrator__service__pb2.PurgeInstancesRequest.FromString, - response_serializer=orchestrator__service__pb2.PurgeInstancesResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.PurgeInstancesRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.PurgeInstancesResponse.SerializeToString, ), 'GetWorkItems': grpc.unary_stream_rpc_method_handler( servicer.GetWorkItems, - request_deserializer=orchestrator__service__pb2.GetWorkItemsRequest.FromString, - response_serializer=orchestrator__service__pb2.WorkItem.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetWorkItemsRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.WorkItem.SerializeToString, ), 'CompleteActivityTask': grpc.unary_unary_rpc_method_handler( servicer.CompleteActivityTask, - request_deserializer=orchestrator__service__pb2.ActivityResponse.FromString, - response_serializer=orchestrator__service__pb2.CompleteTaskResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.ActivityResponse.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CompleteTaskResponse.SerializeToString, ), 'CompleteOrchestratorTask': grpc.unary_unary_rpc_method_handler( servicer.CompleteOrchestratorTask, - request_deserializer=orchestrator__service__pb2.OrchestratorResponse.FromString, - response_serializer=orchestrator__service__pb2.CompleteTaskResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.OrchestratorResponse.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CompleteTaskResponse.SerializeToString, ), 'CompleteEntityTask': grpc.unary_unary_rpc_method_handler( servicer.CompleteEntityTask, - request_deserializer=orchestrator__service__pb2.EntityBatchResult.FromString, - response_serializer=orchestrator__service__pb2.CompleteTaskResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.EntityBatchResult.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CompleteTaskResponse.SerializeToString, ), 'CreateTaskHub': grpc.unary_unary_rpc_method_handler( servicer.CreateTaskHub, - request_deserializer=orchestrator__service__pb2.CreateTaskHubRequest.FromString, - response_serializer=orchestrator__service__pb2.CreateTaskHubResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CreateTaskHubRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CreateTaskHubResponse.SerializeToString, ), 'DeleteTaskHub': grpc.unary_unary_rpc_method_handler( servicer.DeleteTaskHub, - request_deserializer=orchestrator__service__pb2.DeleteTaskHubRequest.FromString, - response_serializer=orchestrator__service__pb2.DeleteTaskHubResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.DeleteTaskHubRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.DeleteTaskHubResponse.SerializeToString, ), 'SignalEntity': grpc.unary_unary_rpc_method_handler( servicer.SignalEntity, - request_deserializer=orchestrator__service__pb2.SignalEntityRequest.FromString, - response_serializer=orchestrator__service__pb2.SignalEntityResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.SignalEntityRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.SignalEntityResponse.SerializeToString, ), 'GetEntity': grpc.unary_unary_rpc_method_handler( servicer.GetEntity, - request_deserializer=orchestrator__service__pb2.GetEntityRequest.FromString, - response_serializer=orchestrator__service__pb2.GetEntityResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetEntityRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetEntityResponse.SerializeToString, ), 'QueryEntities': grpc.unary_unary_rpc_method_handler( servicer.QueryEntities, - request_deserializer=orchestrator__service__pb2.QueryEntitiesRequest.FromString, - response_serializer=orchestrator__service__pb2.QueryEntitiesResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.QueryEntitiesRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.QueryEntitiesResponse.SerializeToString, ), 'CleanEntityStorage': grpc.unary_unary_rpc_method_handler( servicer.CleanEntityStorage, - request_deserializer=orchestrator__service__pb2.CleanEntityStorageRequest.FromString, - response_serializer=orchestrator__service__pb2.CleanEntityStorageResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CleanEntityStorageRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CleanEntityStorageResponse.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'TaskHubSidecarService', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) - server.add_registered_method_handlers('TaskHubSidecarService', rpc_method_handlers) # This class is part of an EXPERIMENTAL API. @@ -437,21 +414,11 @@ def Hello(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/Hello', + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/Hello', google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def StartInstance(request, @@ -464,21 +431,11 @@ def StartInstance(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/StartInstance', - orchestrator__service__pb2.CreateInstanceRequest.SerializeToString, - orchestrator__service__pb2.CreateInstanceResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/StartInstance', + durabletask_dot_internal_dot_orchestrator__service__pb2.CreateInstanceRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.CreateInstanceResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def GetInstance(request, @@ -491,21 +448,11 @@ def GetInstance(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/GetInstance', - orchestrator__service__pb2.GetInstanceRequest.SerializeToString, - orchestrator__service__pb2.GetInstanceResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/GetInstance', + durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def RewindInstance(request, @@ -518,21 +465,11 @@ def RewindInstance(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/RewindInstance', - orchestrator__service__pb2.RewindInstanceRequest.SerializeToString, - orchestrator__service__pb2.RewindInstanceResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/RewindInstance', + durabletask_dot_internal_dot_orchestrator__service__pb2.RewindInstanceRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.RewindInstanceResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def WaitForInstanceStart(request, @@ -545,21 +482,11 @@ def WaitForInstanceStart(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/WaitForInstanceStart', - orchestrator__service__pb2.GetInstanceRequest.SerializeToString, - orchestrator__service__pb2.GetInstanceResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/WaitForInstanceStart', + durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def WaitForInstanceCompletion(request, @@ -572,21 +499,11 @@ def WaitForInstanceCompletion(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/WaitForInstanceCompletion', - orchestrator__service__pb2.GetInstanceRequest.SerializeToString, - orchestrator__service__pb2.GetInstanceResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/WaitForInstanceCompletion', + durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def RaiseEvent(request, @@ -599,21 +516,11 @@ def RaiseEvent(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/RaiseEvent', - orchestrator__service__pb2.RaiseEventRequest.SerializeToString, - orchestrator__service__pb2.RaiseEventResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/RaiseEvent', + durabletask_dot_internal_dot_orchestrator__service__pb2.RaiseEventRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.RaiseEventResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def TerminateInstance(request, @@ -626,21 +533,11 @@ def TerminateInstance(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/TerminateInstance', - orchestrator__service__pb2.TerminateRequest.SerializeToString, - orchestrator__service__pb2.TerminateResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/TerminateInstance', + durabletask_dot_internal_dot_orchestrator__service__pb2.TerminateRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.TerminateResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def SuspendInstance(request, @@ -653,21 +550,11 @@ def SuspendInstance(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/SuspendInstance', - orchestrator__service__pb2.SuspendRequest.SerializeToString, - orchestrator__service__pb2.SuspendResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/SuspendInstance', + durabletask_dot_internal_dot_orchestrator__service__pb2.SuspendRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.SuspendResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def ResumeInstance(request, @@ -680,21 +567,11 @@ def ResumeInstance(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/ResumeInstance', - orchestrator__service__pb2.ResumeRequest.SerializeToString, - orchestrator__service__pb2.ResumeResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/ResumeInstance', + durabletask_dot_internal_dot_orchestrator__service__pb2.ResumeRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.ResumeResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def QueryInstances(request, @@ -707,21 +584,11 @@ def QueryInstances(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/QueryInstances', - orchestrator__service__pb2.QueryInstancesRequest.SerializeToString, - orchestrator__service__pb2.QueryInstancesResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/QueryInstances', + durabletask_dot_internal_dot_orchestrator__service__pb2.QueryInstancesRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.QueryInstancesResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def PurgeInstances(request, @@ -734,21 +601,11 @@ def PurgeInstances(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/PurgeInstances', - orchestrator__service__pb2.PurgeInstancesRequest.SerializeToString, - orchestrator__service__pb2.PurgeInstancesResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/PurgeInstances', + durabletask_dot_internal_dot_orchestrator__service__pb2.PurgeInstancesRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.PurgeInstancesResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def GetWorkItems(request, @@ -761,21 +618,11 @@ def GetWorkItems(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_stream( - request, - target, - '/TaskHubSidecarService/GetWorkItems', - orchestrator__service__pb2.GetWorkItemsRequest.SerializeToString, - orchestrator__service__pb2.WorkItem.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_stream(request, target, '/TaskHubSidecarService/GetWorkItems', + durabletask_dot_internal_dot_orchestrator__service__pb2.GetWorkItemsRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.WorkItem.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def CompleteActivityTask(request, @@ -788,21 +635,11 @@ def CompleteActivityTask(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/CompleteActivityTask', - orchestrator__service__pb2.ActivityResponse.SerializeToString, - orchestrator__service__pb2.CompleteTaskResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/CompleteActivityTask', + durabletask_dot_internal_dot_orchestrator__service__pb2.ActivityResponse.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.CompleteTaskResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def CompleteOrchestratorTask(request, @@ -815,21 +652,11 @@ def CompleteOrchestratorTask(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/CompleteOrchestratorTask', - orchestrator__service__pb2.OrchestratorResponse.SerializeToString, - orchestrator__service__pb2.CompleteTaskResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/CompleteOrchestratorTask', + durabletask_dot_internal_dot_orchestrator__service__pb2.OrchestratorResponse.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.CompleteTaskResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def CompleteEntityTask(request, @@ -842,21 +669,11 @@ def CompleteEntityTask(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/CompleteEntityTask', - orchestrator__service__pb2.EntityBatchResult.SerializeToString, - orchestrator__service__pb2.CompleteTaskResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/CompleteEntityTask', + durabletask_dot_internal_dot_orchestrator__service__pb2.EntityBatchResult.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.CompleteTaskResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def CreateTaskHub(request, @@ -869,21 +686,11 @@ def CreateTaskHub(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/CreateTaskHub', - orchestrator__service__pb2.CreateTaskHubRequest.SerializeToString, - orchestrator__service__pb2.CreateTaskHubResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/CreateTaskHub', + durabletask_dot_internal_dot_orchestrator__service__pb2.CreateTaskHubRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.CreateTaskHubResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def DeleteTaskHub(request, @@ -896,21 +703,11 @@ def DeleteTaskHub(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/DeleteTaskHub', - orchestrator__service__pb2.DeleteTaskHubRequest.SerializeToString, - orchestrator__service__pb2.DeleteTaskHubResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/DeleteTaskHub', + durabletask_dot_internal_dot_orchestrator__service__pb2.DeleteTaskHubRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.DeleteTaskHubResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def SignalEntity(request, @@ -923,21 +720,11 @@ def SignalEntity(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/SignalEntity', - orchestrator__service__pb2.SignalEntityRequest.SerializeToString, - orchestrator__service__pb2.SignalEntityResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/SignalEntity', + durabletask_dot_internal_dot_orchestrator__service__pb2.SignalEntityRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.SignalEntityResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def GetEntity(request, @@ -950,21 +737,11 @@ def GetEntity(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/GetEntity', - orchestrator__service__pb2.GetEntityRequest.SerializeToString, - orchestrator__service__pb2.GetEntityResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/GetEntity', + durabletask_dot_internal_dot_orchestrator__service__pb2.GetEntityRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.GetEntityResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def QueryEntities(request, @@ -977,21 +754,11 @@ def QueryEntities(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/QueryEntities', - orchestrator__service__pb2.QueryEntitiesRequest.SerializeToString, - orchestrator__service__pb2.QueryEntitiesResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/QueryEntities', + durabletask_dot_internal_dot_orchestrator__service__pb2.QueryEntitiesRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.QueryEntitiesResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def CleanEntityStorage(request, @@ -1004,18 +771,8 @@ def CleanEntityStorage(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/TaskHubSidecarService/CleanEntityStorage', - orchestrator__service__pb2.CleanEntityStorageRequest.SerializeToString, - orchestrator__service__pb2.CleanEntityStorageResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) + return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/CleanEntityStorage', + durabletask_dot_internal_dot_orchestrator__service__pb2.CleanEntityStorageRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.CleanEntityStorageResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/requirements.txt b/requirements.txt index af76d88..a31419b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,5 @@ autopep8 -grpcio -grpcio-tools +grpcio>=1.60.0 # 1.60.0 is the version introducing protobuf 1.25.X support, newer versions are backwards compatible protobuf pytest -pytest-cov \ No newline at end of file +pytest-cov From 2466e7d1a859a06e3ee26ccb293bbbac03369730 Mon Sep 17 00:00:00 2001 From: Bernd Verst Date: Fri, 10 Jan 2025 13:01:15 -0800 Subject: [PATCH 15/81] Remove protocol prefix from host name and auto-configure secure mode (#38) Signed-off-by: Albert Callarisa --- CHANGELOG.md | 3 ++- README.md | 2 +- durabletask/internal/shared.py | 17 ++++++++++++ tests/test_client.py | 49 +++++++++++++++++++++++++++++++++- 4 files changed, 68 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a09078d..286312c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,7 +14,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changes -- Protos are compiled with gRPC 1.62.3 / protobuf 3.25.X instead of the latest release. This ensures compatibility with a wider range of grpcio versions for better compatibility with other packages / libraries. +- Protos are compiled with gRPC 1.62.3 / protobuf 3.25.X instead of the latest release. This ensures compatibility with a wider range of grpcio versions for better compatibility with other packages / libraries ([#36](https://github.com/microsoft/durabletask-python/pull/36)) - by [@berndverst](https://github.com/berndverst) +- Http and grpc protocols and their secure variants are stripped from the host name parameter if provided. Secure mode is enabled if the protocol provided is https or grpcs ([#38](https://github.com/microsoft/durabletask-python/pull/38) - by [@berndverst)(https://github.com/berndverst) ### Updates diff --git a/README.md b/README.md index 81b5a54..420d75f 100644 --- a/README.md +++ b/README.md @@ -134,7 +134,7 @@ Orchestrations can specify retry policies for activities and sub-orchestrations. ### Prerequisites -- Python 3.8 +- Python 3.9 - A Durable Task-compatible sidecar, like [Dapr Workflow](https://docs.dapr.io/developing-applications/building-blocks/workflow/workflow-overview/) ### Installing the Durable Task Python client SDK diff --git a/durabletask/internal/shared.py b/durabletask/internal/shared.py index 400529a..c4f3aa4 100644 --- a/durabletask/internal/shared.py +++ b/durabletask/internal/shared.py @@ -15,6 +15,9 @@ # and should be deserialized as a SimpleNamespace AUTO_SERIALIZED = "__durabletask_autoobject__" +SECURE_PROTOCOLS = ["https://", "grpcs://"] +INSECURE_PROTOCOLS = ["http://", "grpc://"] + def get_default_host_address() -> str: return "localhost:4001" @@ -27,6 +30,20 @@ def get_grpc_channel( if host_address is None: host_address = get_default_host_address() + for protocol in SECURE_PROTOCOLS: + if host_address.lower().startswith(protocol): + secure_channel = True + # remove the protocol from the host name + host_address = host_address[len(protocol):] + break + + for protocol in INSECURE_PROTOCOLS: + if host_address.lower().startswith(protocol): + secure_channel = False + # remove the protocol from the host name + host_address = host_address[len(protocol):] + break + if secure_channel: channel = grpc.secure_channel(host_address, grpc.ssl_channel_credentials()) else: diff --git a/tests/test_client.py b/tests/test_client.py index b27f8e3..caacf65 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -1,4 +1,4 @@ -from unittest.mock import patch +from unittest.mock import patch, ANY from durabletask.internal.shared import (DefaultClientInterceptorImpl, get_default_host_address, @@ -39,3 +39,50 @@ def test_get_grpc_channel_with_metadata(): assert args[0] == mock_channel.return_value assert isinstance(args[1], DefaultClientInterceptorImpl) assert args[1]._metadata == METADATA + + +def test_grpc_channel_with_host_name_protocol_stripping(): + with patch('grpc.insecure_channel') as mock_insecure_channel, patch( + 'grpc.secure_channel') as mock_secure_channel: + + host_name = "myserver.com:1234" + + prefix = "grpc://" + get_grpc_channel(prefix + host_name, METADATA) + mock_insecure_channel.assert_called_with(host_name) + + prefix = "http://" + get_grpc_channel(prefix + host_name, METADATA) + mock_insecure_channel.assert_called_with(host_name) + + prefix = "HTTP://" + get_grpc_channel(prefix + host_name, METADATA) + mock_insecure_channel.assert_called_with(host_name) + + prefix = "GRPC://" + get_grpc_channel(prefix + host_name, METADATA) + mock_insecure_channel.assert_called_with(host_name) + + prefix = "" + get_grpc_channel(prefix + host_name, METADATA) + mock_insecure_channel.assert_called_with(host_name) + + prefix = "grpcs://" + get_grpc_channel(prefix + host_name, METADATA) + mock_secure_channel.assert_called_with(host_name, ANY) + + prefix = "https://" + get_grpc_channel(prefix + host_name, METADATA) + mock_secure_channel.assert_called_with(host_name, ANY) + + prefix = "HTTPS://" + get_grpc_channel(prefix + host_name, METADATA) + mock_secure_channel.assert_called_with(host_name, ANY) + + prefix = "GRPCS://" + get_grpc_channel(prefix + host_name, METADATA) + mock_secure_channel.assert_called_with(host_name, ANY) + + prefix = "" + get_grpc_channel(prefix + host_name, METADATA, True) + mock_secure_channel.assert_called_with(host_name, ANY) \ No newline at end of file From 551cb02757918e0a4a47d572edd5723bcc20b8c4 Mon Sep 17 00:00:00 2001 From: Bernd Verst Date: Fri, 17 Jan 2025 09:39:03 -0800 Subject: [PATCH 16/81] Improve Proto Generation: Download proto file directly instead of via submodule (#39) Signed-off-by: Albert Callarisa --- CHANGELOG.md | 1 + Makefile | 3 +- README.md | 10 +- durabletask/internal/PROTO_SOURCE_COMMIT_HASH | 1 + .../internal/orchestrator_service_pb2.py | 352 +++++++++--------- .../internal/orchestrator_service_pb2.pyi | 20 +- submodules/durabletask-protobuf | 1 - 7 files changed, 196 insertions(+), 192 deletions(-) create mode 100644 durabletask/internal/PROTO_SOURCE_COMMIT_HASH delete mode 160000 submodules/durabletask-protobuf diff --git a/CHANGELOG.md b/CHANGELOG.md index 286312c..ee736f0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Protos are compiled with gRPC 1.62.3 / protobuf 3.25.X instead of the latest release. This ensures compatibility with a wider range of grpcio versions for better compatibility with other packages / libraries ([#36](https://github.com/microsoft/durabletask-python/pull/36)) - by [@berndverst](https://github.com/berndverst) - Http and grpc protocols and their secure variants are stripped from the host name parameter if provided. Secure mode is enabled if the protocol provided is https or grpcs ([#38](https://github.com/microsoft/durabletask-python/pull/38) - by [@berndverst)(https://github.com/berndverst) +- Improve ProtoGen by downloading proto file directly instead of using submodule ([#39](https://github.com/microsoft/durabletask-python/pull/39) - by [@berndverst](https://github.com/berndverst) ### Updates diff --git a/Makefile b/Makefile index 68a9b89..5a05f33 100644 --- a/Makefile +++ b/Makefile @@ -11,7 +11,8 @@ install: python3 -m pip install . gen-proto: - cp ./submodules/durabletask-protobuf/protos/orchestrator_service.proto durabletask/internal/orchestrator_service.proto + curl -o durabletask/internal/orchestrator_service.proto https://raw.githubusercontent.com/microsoft/durabletask-protobuf/refs/heads/main/protos/orchestrator_service.proto + curl -H "Accept: application/vnd.github.v3+json" "https://api.github.com/repos/microsoft/durabletask-protobuf/commits?path=protos/orchestrator_service.proto&sha=main&per_page=1" | jq -r '.[0].sha' >> durabletask/internal/PROTO_SOURCE_COMMIT_HASH python3 -m grpc_tools.protoc --proto_path=. --python_out=. --pyi_out=. --grpc_python_out=. ./durabletask/internal/orchestrator_service.proto rm durabletask/internal/*.proto diff --git a/README.md b/README.md index 420d75f..644635e 100644 --- a/README.md +++ b/README.md @@ -161,19 +161,13 @@ The following is more information about how to develop this project. Note that d ### Generating protobufs -Protobuf definitions are stored in the [./submodules/durabletask-proto](./submodules/durabletask-proto) directory, which is a submodule. To update the submodule, run the following command from the project root: - -```sh -git submodule update --init -``` - -Once the submodule is available, the corresponding source code can be regenerated using the following command from the project root: - ```sh pip3 install -r dev-requirements.txt make gen-proto ``` +This will download the `orchestrator_service.proto` from the `microsoft/durabletask-protobuf` repo and compile it using `grpcio-tools`. The version of the source proto file that was downloaded can be found in the file `durabletask/internal/PROTO_SOURCE_COMMIT_HASH`. + ### Running unit tests Unit tests can be run using the following command from the project root. Unit tests _don't_ require a sidecar process to be running. diff --git a/durabletask/internal/PROTO_SOURCE_COMMIT_HASH b/durabletask/internal/PROTO_SOURCE_COMMIT_HASH new file mode 100644 index 0000000..ddbd31a --- /dev/null +++ b/durabletask/internal/PROTO_SOURCE_COMMIT_HASH @@ -0,0 +1 @@ +443b333f4f65a438dc9eb4f090560d232afec4b7 diff --git a/durabletask/internal/orchestrator_service_pb2.py b/durabletask/internal/orchestrator_service_pb2.py index 9c92eac..44b4a32 100644 --- a/durabletask/internal/orchestrator_service_pb2.py +++ b/durabletask/internal/orchestrator_service_pb2.py @@ -18,7 +18,7 @@ from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n/durabletask/internal/orchestrator_service.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1bgoogle/protobuf/empty.proto\"^\n\x15OrchestrationInstance\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xed\x01\n\x0f\x41\x63tivityRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\x12\x0e\n\x06taskId\x18\x05 \x01(\x05\x12)\n\x12parentTraceContext\x18\x06 \x01(\x0b\x32\r.TraceContext\"\x91\x01\n\x10\x41\x63tivityResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0e\n\x06taskId\x18\x02 \x01(\x05\x12,\n\x06result\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\"\xb2\x01\n\x12TaskFailureDetails\x12\x11\n\terrorType\x18\x01 \x01(\t\x12\x14\n\x0c\x65rrorMessage\x18\x02 \x01(\t\x12\x30\n\nstackTrace\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x0cinnerFailure\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\x12\x16\n\x0eisNonRetriable\x18\x05 \x01(\x08\"\xbf\x01\n\x12ParentInstanceInfo\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12*\n\x04name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\"i\n\x0cTraceContext\x12\x13\n\x0btraceParent\x18\x01 \x01(\t\x12\x12\n\x06spanID\x18\x02 \x01(\tB\x02\x18\x01\x12\x30\n\ntraceState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x88\x03\n\x15\x45xecutionStartedEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\x12+\n\x0eparentInstance\x18\x05 \x01(\x0b\x32\x13.ParentInstanceInfo\x12;\n\x17scheduledStartTimestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12)\n\x12parentTraceContext\x18\x07 \x01(\x0b\x32\r.TraceContext\x12\x39\n\x13orchestrationSpanID\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xa7\x01\n\x17\x45xecutionCompletedEvent\x12\x31\n\x13orchestrationStatus\x18\x01 \x01(\x0e\x32\x14.OrchestrationStatus\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x03 \x01(\x0b\x32\x13.TaskFailureDetails\"X\n\x18\x45xecutionTerminatedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x0f\n\x07recurse\x18\x02 \x01(\x08\"\xa9\x01\n\x12TaskScheduledEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x12parentTraceContext\x18\x04 \x01(\x0b\x32\r.TraceContext\"[\n\x12TaskCompletedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"W\n\x0fTaskFailedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\"\xcf\x01\n$SubOrchestrationInstanceCreatedEvent\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x12parentTraceContext\x18\x05 \x01(\x0b\x32\r.TraceContext\"o\n&SubOrchestrationInstanceCompletedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"k\n#SubOrchestrationInstanceFailedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\"?\n\x11TimerCreatedEvent\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"N\n\x0fTimerFiredEvent\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0f\n\x07timerId\x18\x02 \x01(\x05\"\x1a\n\x18OrchestratorStartedEvent\"\x1c\n\x1aOrchestratorCompletedEvent\"_\n\x0e\x45ventSentEvent\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"M\n\x10\x45ventRaisedEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x05input\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\":\n\x0cGenericEvent\x12*\n\x04\x64\x61ta\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"D\n\x11HistoryStateEvent\x12/\n\x12orchestrationState\x18\x01 \x01(\x0b\x32\x13.OrchestrationState\"A\n\x12\x43ontinueAsNewEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"F\n\x17\x45xecutionSuspendedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"D\n\x15\x45xecutionResumedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x86\t\n\x0cHistoryEvent\x12\x0f\n\x07\x65ventId\x18\x01 \x01(\x05\x12-\n\ttimestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x10\x65xecutionStarted\x18\x03 \x01(\x0b\x32\x16.ExecutionStartedEventH\x00\x12\x36\n\x12\x65xecutionCompleted\x18\x04 \x01(\x0b\x32\x18.ExecutionCompletedEventH\x00\x12\x38\n\x13\x65xecutionTerminated\x18\x05 \x01(\x0b\x32\x19.ExecutionTerminatedEventH\x00\x12,\n\rtaskScheduled\x18\x06 \x01(\x0b\x32\x13.TaskScheduledEventH\x00\x12,\n\rtaskCompleted\x18\x07 \x01(\x0b\x32\x13.TaskCompletedEventH\x00\x12&\n\ntaskFailed\x18\x08 \x01(\x0b\x32\x10.TaskFailedEventH\x00\x12P\n\x1fsubOrchestrationInstanceCreated\x18\t \x01(\x0b\x32%.SubOrchestrationInstanceCreatedEventH\x00\x12T\n!subOrchestrationInstanceCompleted\x18\n \x01(\x0b\x32\'.SubOrchestrationInstanceCompletedEventH\x00\x12N\n\x1esubOrchestrationInstanceFailed\x18\x0b \x01(\x0b\x32$.SubOrchestrationInstanceFailedEventH\x00\x12*\n\x0ctimerCreated\x18\x0c \x01(\x0b\x32\x12.TimerCreatedEventH\x00\x12&\n\ntimerFired\x18\r \x01(\x0b\x32\x10.TimerFiredEventH\x00\x12\x38\n\x13orchestratorStarted\x18\x0e \x01(\x0b\x32\x19.OrchestratorStartedEventH\x00\x12<\n\x15orchestratorCompleted\x18\x0f \x01(\x0b\x32\x1b.OrchestratorCompletedEventH\x00\x12$\n\teventSent\x18\x10 \x01(\x0b\x32\x0f.EventSentEventH\x00\x12(\n\x0b\x65ventRaised\x18\x11 \x01(\x0b\x32\x11.EventRaisedEventH\x00\x12%\n\x0cgenericEvent\x18\x12 \x01(\x0b\x32\r.GenericEventH\x00\x12*\n\x0chistoryState\x18\x13 \x01(\x0b\x32\x12.HistoryStateEventH\x00\x12,\n\rcontinueAsNew\x18\x14 \x01(\x0b\x32\x13.ContinueAsNewEventH\x00\x12\x36\n\x12\x65xecutionSuspended\x18\x15 \x01(\x0b\x32\x18.ExecutionSuspendedEventH\x00\x12\x32\n\x10\x65xecutionResumed\x18\x16 \x01(\x0b\x32\x16.ExecutionResumedEventH\x00\x42\x0b\n\teventType\"~\n\x12ScheduleTaskAction\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x9c\x01\n\x1c\x43reateSubOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"?\n\x11\x43reateTimerAction\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"u\n\x0fSendEventAction\x12(\n\x08instance\x18\x01 \x01(\x0b\x32\x16.OrchestrationInstance\x12\x0c\n\x04name\x18\x02 \x01(\t\x12*\n\x04\x64\x61ta\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xb4\x02\n\x1b\x43ompleteOrchestrationAction\x12\x31\n\x13orchestrationStatus\x18\x01 \x01(\x0e\x32\x14.OrchestrationStatus\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07\x64\x65tails\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\nnewVersion\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12&\n\x0f\x63\x61rryoverEvents\x18\x05 \x03(\x0b\x32\r.HistoryEvent\x12+\n\x0e\x66\x61ilureDetails\x18\x06 \x01(\x0b\x32\x13.TaskFailureDetails\"q\n\x1cTerminateOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x0f\n\x07recurse\x18\x03 \x01(\x08\"\xfa\x02\n\x12OrchestratorAction\x12\n\n\x02id\x18\x01 \x01(\x05\x12+\n\x0cscheduleTask\x18\x02 \x01(\x0b\x32\x13.ScheduleTaskActionH\x00\x12?\n\x16\x63reateSubOrchestration\x18\x03 \x01(\x0b\x32\x1d.CreateSubOrchestrationActionH\x00\x12)\n\x0b\x63reateTimer\x18\x04 \x01(\x0b\x32\x12.CreateTimerActionH\x00\x12%\n\tsendEvent\x18\x05 \x01(\x0b\x32\x10.SendEventActionH\x00\x12=\n\x15\x63ompleteOrchestration\x18\x06 \x01(\x0b\x32\x1c.CompleteOrchestrationActionH\x00\x12?\n\x16terminateOrchestration\x18\x07 \x01(\x0b\x32\x1d.TerminateOrchestrationActionH\x00\x42\x18\n\x16orchestratorActionType\"\xda\x01\n\x13OrchestratorRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12!\n\npastEvents\x18\x03 \x03(\x0b\x32\r.HistoryEvent\x12 \n\tnewEvents\x18\x04 \x03(\x0b\x32\r.HistoryEvent\x12\x37\n\x10\x65ntityParameters\x18\x05 \x01(\x0b\x32\x1d.OrchestratorEntityParameters\"\x84\x01\n\x14OrchestratorResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12$\n\x07\x61\x63tions\x18\x02 \x03(\x0b\x32\x13.OrchestratorAction\x12\x32\n\x0c\x63ustomStatus\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xa3\x03\n\x15\x43reateInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12;\n\x17scheduledStartTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12?\n\x1aorchestrationIdReusePolicy\x18\x06 \x01(\x0b\x32\x1b.OrchestrationIdReusePolicy\x12\x31\n\x0b\x65xecutionId\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x04tags\x18\x08 \x03(\x0b\x32 .CreateInstanceRequest.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"w\n\x1aOrchestrationIdReusePolicy\x12-\n\x0foperationStatus\x18\x01 \x03(\x0e\x32\x14.OrchestrationStatus\x12*\n\x06\x61\x63tion\x18\x02 \x01(\x0e\x32\x1a.CreateOrchestrationAction\",\n\x16\x43reateInstanceResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\"E\n\x12GetInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x1b\n\x13getInputsAndOutputs\x18\x02 \x01(\x08\"V\n\x13GetInstanceResponse\x12\x0e\n\x06\x65xists\x18\x01 \x01(\x08\x12/\n\x12orchestrationState\x18\x02 \x01(\x0b\x32\x13.OrchestrationState\"Y\n\x15RewindInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x18\n\x16RewindInstanceResponse\"\xa4\x05\n\x12OrchestrationState\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\x13orchestrationStatus\x18\x04 \x01(\x0e\x32\x14.OrchestrationStatus\x12;\n\x17scheduledStartTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x34\n\x10\x63reatedTimestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x38\n\x14lastUpdatedTimestamp\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\x05input\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12,\n\x06output\x18\t \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0c\x63ustomStatus\x18\n \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x0b \x01(\x0b\x32\x13.TaskFailureDetails\x12\x31\n\x0b\x65xecutionId\x18\x0c \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x12\x63ompletedTimestamp\x18\r \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x36\n\x10parentInstanceId\x18\x0e \x01(\x0b\x32\x1c.google.protobuf.StringValue\"b\n\x11RaiseEventRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x14\n\x12RaiseEventResponse\"g\n\x10TerminateRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06output\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x11\n\trecursive\x18\x03 \x01(\x08\"\x13\n\x11TerminateResponse\"R\n\x0eSuspendRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x11\n\x0fSuspendResponse\"Q\n\rResumeRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x10\n\x0eResumeResponse\"6\n\x15QueryInstancesRequest\x12\x1d\n\x05query\x18\x01 \x01(\x0b\x32\x0e.InstanceQuery\"\x82\x03\n\rInstanceQuery\x12+\n\rruntimeStatus\x18\x01 \x03(\x0e\x32\x14.OrchestrationStatus\x12\x33\n\x0f\x63reatedTimeFrom\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rcreatedTimeTo\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0ctaskHubNames\x18\x04 \x03(\x0b\x32\x1c.google.protobuf.StringValue\x12\x18\n\x10maxInstanceCount\x18\x05 \x01(\x05\x12\x37\n\x11\x63ontinuationToken\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10instanceIdPrefix\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1d\n\x15\x66\x65tchInputsAndOutputs\x18\x08 \x01(\x08\"\x82\x01\n\x16QueryInstancesResponse\x12/\n\x12orchestrationState\x18\x01 \x03(\x0b\x32\x13.OrchestrationState\x12\x37\n\x11\x63ontinuationToken\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x80\x01\n\x15PurgeInstancesRequest\x12\x14\n\ninstanceId\x18\x01 \x01(\tH\x00\x12\x33\n\x13purgeInstanceFilter\x18\x02 \x01(\x0b\x32\x14.PurgeInstanceFilterH\x00\x12\x11\n\trecursive\x18\x03 \x01(\x08\x42\t\n\x07request\"\xaa\x01\n\x13PurgeInstanceFilter\x12\x33\n\x0f\x63reatedTimeFrom\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rcreatedTimeTo\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\rruntimeStatus\x18\x03 \x03(\x0e\x32\x14.OrchestrationStatus\"6\n\x16PurgeInstancesResponse\x12\x1c\n\x14\x64\x65letedInstanceCount\x18\x01 \x01(\x05\"0\n\x14\x43reateTaskHubRequest\x12\x18\n\x10recreateIfExists\x18\x01 \x01(\x08\"\x17\n\x15\x43reateTaskHubResponse\"\x16\n\x14\x44\x65leteTaskHubRequest\"\x17\n\x15\x44\x65leteTaskHubResponse\"\xaa\x01\n\x13SignalEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x11\n\trequestId\x18\x04 \x01(\t\x12\x31\n\rscheduledTime\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\x16\n\x14SignalEntityResponse\"<\n\x10GetEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x14\n\x0cincludeState\x18\x02 \x01(\x08\"D\n\x11GetEntityResponse\x12\x0e\n\x06\x65xists\x18\x01 \x01(\x08\x12\x1f\n\x06\x65ntity\x18\x02 \x01(\x0b\x32\x0f.EntityMetadata\"\xcb\x02\n\x0b\x45ntityQuery\x12:\n\x14instanceIdStartsWith\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x34\n\x10lastModifiedFrom\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0elastModifiedTo\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x14\n\x0cincludeState\x18\x04 \x01(\x08\x12\x18\n\x10includeTransient\x18\x05 \x01(\x08\x12-\n\x08pageSize\x18\x06 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12\x37\n\x11\x63ontinuationToken\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"3\n\x14QueryEntitiesRequest\x12\x1b\n\x05query\x18\x01 \x01(\x0b\x32\x0c.EntityQuery\"s\n\x15QueryEntitiesResponse\x12!\n\x08\x65ntities\x18\x01 \x03(\x0b\x32\x0f.EntityMetadata\x12\x37\n\x11\x63ontinuationToken\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xdb\x01\n\x0e\x45ntityMetadata\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x34\n\x10lastModifiedTime\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x18\n\x10\x62\x61\x63klogQueueSize\x18\x03 \x01(\x05\x12.\n\x08lockedBy\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x0fserializedState\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x8f\x01\n\x19\x43leanEntityStorageRequest\x12\x37\n\x11\x63ontinuationToken\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1b\n\x13removeEmptyEntities\x18\x02 \x01(\x08\x12\x1c\n\x14releaseOrphanedLocks\x18\x03 \x01(\x08\"\x92\x01\n\x1a\x43leanEntityStorageResponse\x12\x37\n\x11\x63ontinuationToken\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1c\n\x14\x65mptyEntitiesRemoved\x18\x02 \x01(\x05\x12\x1d\n\x15orphanedLocksReleased\x18\x03 \x01(\x05\"]\n\x1cOrchestratorEntityParameters\x12=\n\x1a\x65ntityMessageReorderWindow\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\"\x82\x01\n\x12\x45ntityBatchRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65ntityState\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12%\n\noperations\x18\x03 \x03(\x0b\x32\x11.OperationRequest\"\xb9\x01\n\x11\x45ntityBatchResult\x12!\n\x07results\x18\x01 \x03(\x0b\x32\x10.OperationResult\x12!\n\x07\x61\x63tions\x18\x02 \x03(\x0b\x32\x10.OperationAction\x12\x31\n\x0b\x65ntityState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\"e\n\x10OperationRequest\x12\x11\n\toperation\x18\x01 \x01(\t\x12\x11\n\trequestId\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"w\n\x0fOperationResult\x12*\n\x07success\x18\x01 \x01(\x0b\x32\x17.OperationResultSuccessH\x00\x12*\n\x07\x66\x61ilure\x18\x02 \x01(\x0b\x32\x17.OperationResultFailureH\x00\x42\x0c\n\nresultType\"F\n\x16OperationResultSuccess\x12,\n\x06result\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"E\n\x16OperationResultFailure\x12+\n\x0e\x66\x61ilureDetails\x18\x01 \x01(\x0b\x32\x13.TaskFailureDetails\"\x9c\x01\n\x0fOperationAction\x12\n\n\x02id\x18\x01 \x01(\x05\x12\'\n\nsendSignal\x18\x02 \x01(\x0b\x32\x11.SendSignalActionH\x00\x12=\n\x15startNewOrchestration\x18\x03 \x01(\x0b\x32\x1c.StartNewOrchestrationActionH\x00\x42\x15\n\x13operationActionType\"\x94\x01\n\x10SendSignalAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rscheduledTime\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xce\x01\n\x1bStartNewOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rscheduledTime\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\x15\n\x13GetWorkItemsRequest\"\xe1\x01\n\x08WorkItem\x12\x33\n\x13orchestratorRequest\x18\x01 \x01(\x0b\x32\x14.OrchestratorRequestH\x00\x12+\n\x0f\x61\x63tivityRequest\x18\x02 \x01(\x0b\x32\x10.ActivityRequestH\x00\x12,\n\rentityRequest\x18\x03 \x01(\x0b\x32\x13.EntityBatchRequestH\x00\x12!\n\nhealthPing\x18\x04 \x01(\x0b\x32\x0b.HealthPingH\x00\x12\x17\n\x0f\x63ompletionToken\x18\n \x01(\tB\t\n\x07request\"\x16\n\x14\x43ompleteTaskResponse\"\x0c\n\nHealthPing*\xb5\x02\n\x13OrchestrationStatus\x12 \n\x1cORCHESTRATION_STATUS_RUNNING\x10\x00\x12\"\n\x1eORCHESTRATION_STATUS_COMPLETED\x10\x01\x12)\n%ORCHESTRATION_STATUS_CONTINUED_AS_NEW\x10\x02\x12\x1f\n\x1bORCHESTRATION_STATUS_FAILED\x10\x03\x12!\n\x1dORCHESTRATION_STATUS_CANCELED\x10\x04\x12#\n\x1fORCHESTRATION_STATUS_TERMINATED\x10\x05\x12 \n\x1cORCHESTRATION_STATUS_PENDING\x10\x06\x12\"\n\x1eORCHESTRATION_STATUS_SUSPENDED\x10\x07*A\n\x19\x43reateOrchestrationAction\x12\t\n\x05\x45RROR\x10\x00\x12\n\n\x06IGNORE\x10\x01\x12\r\n\tTERMINATE\x10\x02\x32\xfc\n\n\x15TaskHubSidecarService\x12\x37\n\x05Hello\x12\x16.google.protobuf.Empty\x1a\x16.google.protobuf.Empty\x12@\n\rStartInstance\x12\x16.CreateInstanceRequest\x1a\x17.CreateInstanceResponse\x12\x38\n\x0bGetInstance\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x41\n\x0eRewindInstance\x12\x16.RewindInstanceRequest\x1a\x17.RewindInstanceResponse\x12\x41\n\x14WaitForInstanceStart\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x46\n\x19WaitForInstanceCompletion\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x35\n\nRaiseEvent\x12\x12.RaiseEventRequest\x1a\x13.RaiseEventResponse\x12:\n\x11TerminateInstance\x12\x11.TerminateRequest\x1a\x12.TerminateResponse\x12\x34\n\x0fSuspendInstance\x12\x0f.SuspendRequest\x1a\x10.SuspendResponse\x12\x31\n\x0eResumeInstance\x12\x0e.ResumeRequest\x1a\x0f.ResumeResponse\x12\x41\n\x0eQueryInstances\x12\x16.QueryInstancesRequest\x1a\x17.QueryInstancesResponse\x12\x41\n\x0ePurgeInstances\x12\x16.PurgeInstancesRequest\x1a\x17.PurgeInstancesResponse\x12\x31\n\x0cGetWorkItems\x12\x14.GetWorkItemsRequest\x1a\t.WorkItem0\x01\x12@\n\x14\x43ompleteActivityTask\x12\x11.ActivityResponse\x1a\x15.CompleteTaskResponse\x12H\n\x18\x43ompleteOrchestratorTask\x12\x15.OrchestratorResponse\x1a\x15.CompleteTaskResponse\x12?\n\x12\x43ompleteEntityTask\x12\x12.EntityBatchResult\x1a\x15.CompleteTaskResponse\x12>\n\rCreateTaskHub\x12\x15.CreateTaskHubRequest\x1a\x16.CreateTaskHubResponse\x12>\n\rDeleteTaskHub\x12\x15.DeleteTaskHubRequest\x1a\x16.DeleteTaskHubResponse\x12;\n\x0cSignalEntity\x12\x14.SignalEntityRequest\x1a\x15.SignalEntityResponse\x12\x32\n\tGetEntity\x12\x11.GetEntityRequest\x1a\x12.GetEntityResponse\x12>\n\rQueryEntities\x12\x15.QueryEntitiesRequest\x1a\x16.QueryEntitiesResponse\x12M\n\x12\x43leanEntityStorage\x12\x1a.CleanEntityStorageRequest\x1a\x1b.CleanEntityStorageResponseBf\n1com.microsoft.durabletask.implementation.protobufZ\x10/internal/protos\xaa\x02\x1eMicrosoft.DurableTask.Protobufb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n/durabletask/internal/orchestrator_service.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1bgoogle/protobuf/empty.proto\"^\n\x15OrchestrationInstance\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xed\x01\n\x0f\x41\x63tivityRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\x12\x0e\n\x06taskId\x18\x05 \x01(\x05\x12)\n\x12parentTraceContext\x18\x06 \x01(\x0b\x32\r.TraceContext\"\xaa\x01\n\x10\x41\x63tivityResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0e\n\x06taskId\x18\x02 \x01(\x05\x12,\n\x06result\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\x12\x17\n\x0f\x63ompletionToken\x18\x05 \x01(\t\"\xb2\x01\n\x12TaskFailureDetails\x12\x11\n\terrorType\x18\x01 \x01(\t\x12\x14\n\x0c\x65rrorMessage\x18\x02 \x01(\t\x12\x30\n\nstackTrace\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x0cinnerFailure\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\x12\x16\n\x0eisNonRetriable\x18\x05 \x01(\x08\"\xbf\x01\n\x12ParentInstanceInfo\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12*\n\x04name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\"i\n\x0cTraceContext\x12\x13\n\x0btraceParent\x18\x01 \x01(\t\x12\x12\n\x06spanID\x18\x02 \x01(\tB\x02\x18\x01\x12\x30\n\ntraceState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x88\x03\n\x15\x45xecutionStartedEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\x12+\n\x0eparentInstance\x18\x05 \x01(\x0b\x32\x13.ParentInstanceInfo\x12;\n\x17scheduledStartTimestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12)\n\x12parentTraceContext\x18\x07 \x01(\x0b\x32\r.TraceContext\x12\x39\n\x13orchestrationSpanID\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xa7\x01\n\x17\x45xecutionCompletedEvent\x12\x31\n\x13orchestrationStatus\x18\x01 \x01(\x0e\x32\x14.OrchestrationStatus\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x03 \x01(\x0b\x32\x13.TaskFailureDetails\"X\n\x18\x45xecutionTerminatedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x0f\n\x07recurse\x18\x02 \x01(\x08\"\xa9\x01\n\x12TaskScheduledEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x12parentTraceContext\x18\x04 \x01(\x0b\x32\r.TraceContext\"[\n\x12TaskCompletedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"W\n\x0fTaskFailedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\"\xcf\x01\n$SubOrchestrationInstanceCreatedEvent\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x12parentTraceContext\x18\x05 \x01(\x0b\x32\r.TraceContext\"o\n&SubOrchestrationInstanceCompletedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"k\n#SubOrchestrationInstanceFailedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\"?\n\x11TimerCreatedEvent\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"N\n\x0fTimerFiredEvent\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0f\n\x07timerId\x18\x02 \x01(\x05\"\x1a\n\x18OrchestratorStartedEvent\"\x1c\n\x1aOrchestratorCompletedEvent\"_\n\x0e\x45ventSentEvent\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"M\n\x10\x45ventRaisedEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x05input\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\":\n\x0cGenericEvent\x12*\n\x04\x64\x61ta\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"D\n\x11HistoryStateEvent\x12/\n\x12orchestrationState\x18\x01 \x01(\x0b\x32\x13.OrchestrationState\"A\n\x12\x43ontinueAsNewEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"F\n\x17\x45xecutionSuspendedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"D\n\x15\x45xecutionResumedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x86\t\n\x0cHistoryEvent\x12\x0f\n\x07\x65ventId\x18\x01 \x01(\x05\x12-\n\ttimestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x10\x65xecutionStarted\x18\x03 \x01(\x0b\x32\x16.ExecutionStartedEventH\x00\x12\x36\n\x12\x65xecutionCompleted\x18\x04 \x01(\x0b\x32\x18.ExecutionCompletedEventH\x00\x12\x38\n\x13\x65xecutionTerminated\x18\x05 \x01(\x0b\x32\x19.ExecutionTerminatedEventH\x00\x12,\n\rtaskScheduled\x18\x06 \x01(\x0b\x32\x13.TaskScheduledEventH\x00\x12,\n\rtaskCompleted\x18\x07 \x01(\x0b\x32\x13.TaskCompletedEventH\x00\x12&\n\ntaskFailed\x18\x08 \x01(\x0b\x32\x10.TaskFailedEventH\x00\x12P\n\x1fsubOrchestrationInstanceCreated\x18\t \x01(\x0b\x32%.SubOrchestrationInstanceCreatedEventH\x00\x12T\n!subOrchestrationInstanceCompleted\x18\n \x01(\x0b\x32\'.SubOrchestrationInstanceCompletedEventH\x00\x12N\n\x1esubOrchestrationInstanceFailed\x18\x0b \x01(\x0b\x32$.SubOrchestrationInstanceFailedEventH\x00\x12*\n\x0ctimerCreated\x18\x0c \x01(\x0b\x32\x12.TimerCreatedEventH\x00\x12&\n\ntimerFired\x18\r \x01(\x0b\x32\x10.TimerFiredEventH\x00\x12\x38\n\x13orchestratorStarted\x18\x0e \x01(\x0b\x32\x19.OrchestratorStartedEventH\x00\x12<\n\x15orchestratorCompleted\x18\x0f \x01(\x0b\x32\x1b.OrchestratorCompletedEventH\x00\x12$\n\teventSent\x18\x10 \x01(\x0b\x32\x0f.EventSentEventH\x00\x12(\n\x0b\x65ventRaised\x18\x11 \x01(\x0b\x32\x11.EventRaisedEventH\x00\x12%\n\x0cgenericEvent\x18\x12 \x01(\x0b\x32\r.GenericEventH\x00\x12*\n\x0chistoryState\x18\x13 \x01(\x0b\x32\x12.HistoryStateEventH\x00\x12,\n\rcontinueAsNew\x18\x14 \x01(\x0b\x32\x13.ContinueAsNewEventH\x00\x12\x36\n\x12\x65xecutionSuspended\x18\x15 \x01(\x0b\x32\x18.ExecutionSuspendedEventH\x00\x12\x32\n\x10\x65xecutionResumed\x18\x16 \x01(\x0b\x32\x16.ExecutionResumedEventH\x00\x42\x0b\n\teventType\"~\n\x12ScheduleTaskAction\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x9c\x01\n\x1c\x43reateSubOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"?\n\x11\x43reateTimerAction\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"u\n\x0fSendEventAction\x12(\n\x08instance\x18\x01 \x01(\x0b\x32\x16.OrchestrationInstance\x12\x0c\n\x04name\x18\x02 \x01(\t\x12*\n\x04\x64\x61ta\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xb4\x02\n\x1b\x43ompleteOrchestrationAction\x12\x31\n\x13orchestrationStatus\x18\x01 \x01(\x0e\x32\x14.OrchestrationStatus\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07\x64\x65tails\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\nnewVersion\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12&\n\x0f\x63\x61rryoverEvents\x18\x05 \x03(\x0b\x32\r.HistoryEvent\x12+\n\x0e\x66\x61ilureDetails\x18\x06 \x01(\x0b\x32\x13.TaskFailureDetails\"q\n\x1cTerminateOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x0f\n\x07recurse\x18\x03 \x01(\x08\"\xfa\x02\n\x12OrchestratorAction\x12\n\n\x02id\x18\x01 \x01(\x05\x12+\n\x0cscheduleTask\x18\x02 \x01(\x0b\x32\x13.ScheduleTaskActionH\x00\x12?\n\x16\x63reateSubOrchestration\x18\x03 \x01(\x0b\x32\x1d.CreateSubOrchestrationActionH\x00\x12)\n\x0b\x63reateTimer\x18\x04 \x01(\x0b\x32\x12.CreateTimerActionH\x00\x12%\n\tsendEvent\x18\x05 \x01(\x0b\x32\x10.SendEventActionH\x00\x12=\n\x15\x63ompleteOrchestration\x18\x06 \x01(\x0b\x32\x1c.CompleteOrchestrationActionH\x00\x12?\n\x16terminateOrchestration\x18\x07 \x01(\x0b\x32\x1d.TerminateOrchestrationActionH\x00\x42\x18\n\x16orchestratorActionType\"\xda\x01\n\x13OrchestratorRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12!\n\npastEvents\x18\x03 \x03(\x0b\x32\r.HistoryEvent\x12 \n\tnewEvents\x18\x04 \x03(\x0b\x32\r.HistoryEvent\x12\x37\n\x10\x65ntityParameters\x18\x05 \x01(\x0b\x32\x1d.OrchestratorEntityParameters\"\x9d\x01\n\x14OrchestratorResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12$\n\x07\x61\x63tions\x18\x02 \x03(\x0b\x32\x13.OrchestratorAction\x12\x32\n\x0c\x63ustomStatus\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x17\n\x0f\x63ompletionToken\x18\x04 \x01(\t\"\xa3\x03\n\x15\x43reateInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12;\n\x17scheduledStartTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12?\n\x1aorchestrationIdReusePolicy\x18\x06 \x01(\x0b\x32\x1b.OrchestrationIdReusePolicy\x12\x31\n\x0b\x65xecutionId\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x04tags\x18\x08 \x03(\x0b\x32 .CreateInstanceRequest.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"w\n\x1aOrchestrationIdReusePolicy\x12-\n\x0foperationStatus\x18\x01 \x03(\x0e\x32\x14.OrchestrationStatus\x12*\n\x06\x61\x63tion\x18\x02 \x01(\x0e\x32\x1a.CreateOrchestrationAction\",\n\x16\x43reateInstanceResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\"E\n\x12GetInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x1b\n\x13getInputsAndOutputs\x18\x02 \x01(\x08\"V\n\x13GetInstanceResponse\x12\x0e\n\x06\x65xists\x18\x01 \x01(\x08\x12/\n\x12orchestrationState\x18\x02 \x01(\x0b\x32\x13.OrchestrationState\"Y\n\x15RewindInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x18\n\x16RewindInstanceResponse\"\xa4\x05\n\x12OrchestrationState\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\x13orchestrationStatus\x18\x04 \x01(\x0e\x32\x14.OrchestrationStatus\x12;\n\x17scheduledStartTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x34\n\x10\x63reatedTimestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x38\n\x14lastUpdatedTimestamp\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\x05input\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12,\n\x06output\x18\t \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0c\x63ustomStatus\x18\n \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x0b \x01(\x0b\x32\x13.TaskFailureDetails\x12\x31\n\x0b\x65xecutionId\x18\x0c \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x12\x63ompletedTimestamp\x18\r \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x36\n\x10parentInstanceId\x18\x0e \x01(\x0b\x32\x1c.google.protobuf.StringValue\"b\n\x11RaiseEventRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x14\n\x12RaiseEventResponse\"g\n\x10TerminateRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06output\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x11\n\trecursive\x18\x03 \x01(\x08\"\x13\n\x11TerminateResponse\"R\n\x0eSuspendRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x11\n\x0fSuspendResponse\"Q\n\rResumeRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x10\n\x0eResumeResponse\"6\n\x15QueryInstancesRequest\x12\x1d\n\x05query\x18\x01 \x01(\x0b\x32\x0e.InstanceQuery\"\x82\x03\n\rInstanceQuery\x12+\n\rruntimeStatus\x18\x01 \x03(\x0e\x32\x14.OrchestrationStatus\x12\x33\n\x0f\x63reatedTimeFrom\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rcreatedTimeTo\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0ctaskHubNames\x18\x04 \x03(\x0b\x32\x1c.google.protobuf.StringValue\x12\x18\n\x10maxInstanceCount\x18\x05 \x01(\x05\x12\x37\n\x11\x63ontinuationToken\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10instanceIdPrefix\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1d\n\x15\x66\x65tchInputsAndOutputs\x18\x08 \x01(\x08\"\x82\x01\n\x16QueryInstancesResponse\x12/\n\x12orchestrationState\x18\x01 \x03(\x0b\x32\x13.OrchestrationState\x12\x37\n\x11\x63ontinuationToken\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x80\x01\n\x15PurgeInstancesRequest\x12\x14\n\ninstanceId\x18\x01 \x01(\tH\x00\x12\x33\n\x13purgeInstanceFilter\x18\x02 \x01(\x0b\x32\x14.PurgeInstanceFilterH\x00\x12\x11\n\trecursive\x18\x03 \x01(\x08\x42\t\n\x07request\"\xaa\x01\n\x13PurgeInstanceFilter\x12\x33\n\x0f\x63reatedTimeFrom\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rcreatedTimeTo\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\rruntimeStatus\x18\x03 \x03(\x0e\x32\x14.OrchestrationStatus\"6\n\x16PurgeInstancesResponse\x12\x1c\n\x14\x64\x65letedInstanceCount\x18\x01 \x01(\x05\"0\n\x14\x43reateTaskHubRequest\x12\x18\n\x10recreateIfExists\x18\x01 \x01(\x08\"\x17\n\x15\x43reateTaskHubResponse\"\x16\n\x14\x44\x65leteTaskHubRequest\"\x17\n\x15\x44\x65leteTaskHubResponse\"\xaa\x01\n\x13SignalEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x11\n\trequestId\x18\x04 \x01(\t\x12\x31\n\rscheduledTime\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\x16\n\x14SignalEntityResponse\"<\n\x10GetEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x14\n\x0cincludeState\x18\x02 \x01(\x08\"D\n\x11GetEntityResponse\x12\x0e\n\x06\x65xists\x18\x01 \x01(\x08\x12\x1f\n\x06\x65ntity\x18\x02 \x01(\x0b\x32\x0f.EntityMetadata\"\xcb\x02\n\x0b\x45ntityQuery\x12:\n\x14instanceIdStartsWith\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x34\n\x10lastModifiedFrom\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0elastModifiedTo\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x14\n\x0cincludeState\x18\x04 \x01(\x08\x12\x18\n\x10includeTransient\x18\x05 \x01(\x08\x12-\n\x08pageSize\x18\x06 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12\x37\n\x11\x63ontinuationToken\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"3\n\x14QueryEntitiesRequest\x12\x1b\n\x05query\x18\x01 \x01(\x0b\x32\x0c.EntityQuery\"s\n\x15QueryEntitiesResponse\x12!\n\x08\x65ntities\x18\x01 \x03(\x0b\x32\x0f.EntityMetadata\x12\x37\n\x11\x63ontinuationToken\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xdb\x01\n\x0e\x45ntityMetadata\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x34\n\x10lastModifiedTime\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x18\n\x10\x62\x61\x63klogQueueSize\x18\x03 \x01(\x05\x12.\n\x08lockedBy\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x0fserializedState\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x8f\x01\n\x19\x43leanEntityStorageRequest\x12\x37\n\x11\x63ontinuationToken\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1b\n\x13removeEmptyEntities\x18\x02 \x01(\x08\x12\x1c\n\x14releaseOrphanedLocks\x18\x03 \x01(\x08\"\x92\x01\n\x1a\x43leanEntityStorageResponse\x12\x37\n\x11\x63ontinuationToken\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1c\n\x14\x65mptyEntitiesRemoved\x18\x02 \x01(\x05\x12\x1d\n\x15orphanedLocksReleased\x18\x03 \x01(\x05\"]\n\x1cOrchestratorEntityParameters\x12=\n\x1a\x65ntityMessageReorderWindow\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\"\x82\x01\n\x12\x45ntityBatchRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65ntityState\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12%\n\noperations\x18\x03 \x03(\x0b\x32\x11.OperationRequest\"\xb9\x01\n\x11\x45ntityBatchResult\x12!\n\x07results\x18\x01 \x03(\x0b\x32\x10.OperationResult\x12!\n\x07\x61\x63tions\x18\x02 \x03(\x0b\x32\x10.OperationAction\x12\x31\n\x0b\x65ntityState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\"e\n\x10OperationRequest\x12\x11\n\toperation\x18\x01 \x01(\t\x12\x11\n\trequestId\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"w\n\x0fOperationResult\x12*\n\x07success\x18\x01 \x01(\x0b\x32\x17.OperationResultSuccessH\x00\x12*\n\x07\x66\x61ilure\x18\x02 \x01(\x0b\x32\x17.OperationResultFailureH\x00\x42\x0c\n\nresultType\"F\n\x16OperationResultSuccess\x12,\n\x06result\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"E\n\x16OperationResultFailure\x12+\n\x0e\x66\x61ilureDetails\x18\x01 \x01(\x0b\x32\x13.TaskFailureDetails\"\x9c\x01\n\x0fOperationAction\x12\n\n\x02id\x18\x01 \x01(\x05\x12\'\n\nsendSignal\x18\x02 \x01(\x0b\x32\x11.SendSignalActionH\x00\x12=\n\x15startNewOrchestration\x18\x03 \x01(\x0b\x32\x1c.StartNewOrchestrationActionH\x00\x42\x15\n\x13operationActionType\"\x94\x01\n\x10SendSignalAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rscheduledTime\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xce\x01\n\x1bStartNewOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rscheduledTime\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"j\n\x13GetWorkItemsRequest\x12+\n#maxConcurrentOrchestrationWorkItems\x18\x01 \x01(\x05\x12&\n\x1emaxConcurrentActivityWorkItems\x18\x02 \x01(\x05\"\xe1\x01\n\x08WorkItem\x12\x33\n\x13orchestratorRequest\x18\x01 \x01(\x0b\x32\x14.OrchestratorRequestH\x00\x12+\n\x0f\x61\x63tivityRequest\x18\x02 \x01(\x0b\x32\x10.ActivityRequestH\x00\x12,\n\rentityRequest\x18\x03 \x01(\x0b\x32\x13.EntityBatchRequestH\x00\x12!\n\nhealthPing\x18\x04 \x01(\x0b\x32\x0b.HealthPingH\x00\x12\x17\n\x0f\x63ompletionToken\x18\n \x01(\tB\t\n\x07request\"\x16\n\x14\x43ompleteTaskResponse\"\x0c\n\nHealthPing*\xb5\x02\n\x13OrchestrationStatus\x12 \n\x1cORCHESTRATION_STATUS_RUNNING\x10\x00\x12\"\n\x1eORCHESTRATION_STATUS_COMPLETED\x10\x01\x12)\n%ORCHESTRATION_STATUS_CONTINUED_AS_NEW\x10\x02\x12\x1f\n\x1bORCHESTRATION_STATUS_FAILED\x10\x03\x12!\n\x1dORCHESTRATION_STATUS_CANCELED\x10\x04\x12#\n\x1fORCHESTRATION_STATUS_TERMINATED\x10\x05\x12 \n\x1cORCHESTRATION_STATUS_PENDING\x10\x06\x12\"\n\x1eORCHESTRATION_STATUS_SUSPENDED\x10\x07*A\n\x19\x43reateOrchestrationAction\x12\t\n\x05\x45RROR\x10\x00\x12\n\n\x06IGNORE\x10\x01\x12\r\n\tTERMINATE\x10\x02\x32\xfc\n\n\x15TaskHubSidecarService\x12\x37\n\x05Hello\x12\x16.google.protobuf.Empty\x1a\x16.google.protobuf.Empty\x12@\n\rStartInstance\x12\x16.CreateInstanceRequest\x1a\x17.CreateInstanceResponse\x12\x38\n\x0bGetInstance\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x41\n\x0eRewindInstance\x12\x16.RewindInstanceRequest\x1a\x17.RewindInstanceResponse\x12\x41\n\x14WaitForInstanceStart\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x46\n\x19WaitForInstanceCompletion\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x35\n\nRaiseEvent\x12\x12.RaiseEventRequest\x1a\x13.RaiseEventResponse\x12:\n\x11TerminateInstance\x12\x11.TerminateRequest\x1a\x12.TerminateResponse\x12\x34\n\x0fSuspendInstance\x12\x0f.SuspendRequest\x1a\x10.SuspendResponse\x12\x31\n\x0eResumeInstance\x12\x0e.ResumeRequest\x1a\x0f.ResumeResponse\x12\x41\n\x0eQueryInstances\x12\x16.QueryInstancesRequest\x1a\x17.QueryInstancesResponse\x12\x41\n\x0ePurgeInstances\x12\x16.PurgeInstancesRequest\x1a\x17.PurgeInstancesResponse\x12\x31\n\x0cGetWorkItems\x12\x14.GetWorkItemsRequest\x1a\t.WorkItem0\x01\x12@\n\x14\x43ompleteActivityTask\x12\x11.ActivityResponse\x1a\x15.CompleteTaskResponse\x12H\n\x18\x43ompleteOrchestratorTask\x12\x15.OrchestratorResponse\x1a\x15.CompleteTaskResponse\x12?\n\x12\x43ompleteEntityTask\x12\x12.EntityBatchResult\x1a\x15.CompleteTaskResponse\x12>\n\rCreateTaskHub\x12\x15.CreateTaskHubRequest\x1a\x16.CreateTaskHubResponse\x12>\n\rDeleteTaskHub\x12\x15.DeleteTaskHubRequest\x1a\x16.DeleteTaskHubResponse\x12;\n\x0cSignalEntity\x12\x14.SignalEntityRequest\x1a\x15.SignalEntityResponse\x12\x32\n\tGetEntity\x12\x11.GetEntityRequest\x1a\x12.GetEntityResponse\x12>\n\rQueryEntities\x12\x15.QueryEntitiesRequest\x1a\x16.QueryEntitiesResponse\x12M\n\x12\x43leanEntityStorage\x12\x1a.CleanEntityStorageRequest\x1a\x1b.CleanEntityStorageResponseBf\n1com.microsoft.durabletask.implementation.protobufZ\x10/internal/protos\xaa\x02\x1eMicrosoft.DurableTask.Protobufb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -30,184 +30,184 @@ _globals['_TRACECONTEXT'].fields_by_name['spanID']._serialized_options = b'\030\001' _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._options = None _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_options = b'8\001' - _globals['_ORCHESTRATIONSTATUS']._serialized_start=12097 - _globals['_ORCHESTRATIONSTATUS']._serialized_end=12406 - _globals['_CREATEORCHESTRATIONACTION']._serialized_start=12408 - _globals['_CREATEORCHESTRATIONACTION']._serialized_end=12473 + _globals['_ORCHESTRATIONSTATUS']._serialized_start=12232 + _globals['_ORCHESTRATIONSTATUS']._serialized_end=12541 + _globals['_CREATEORCHESTRATIONACTION']._serialized_start=12543 + _globals['_CREATEORCHESTRATIONACTION']._serialized_end=12608 _globals['_ORCHESTRATIONINSTANCE']._serialized_start=177 _globals['_ORCHESTRATIONINSTANCE']._serialized_end=271 _globals['_ACTIVITYREQUEST']._serialized_start=274 _globals['_ACTIVITYREQUEST']._serialized_end=511 _globals['_ACTIVITYRESPONSE']._serialized_start=514 - _globals['_ACTIVITYRESPONSE']._serialized_end=659 - _globals['_TASKFAILUREDETAILS']._serialized_start=662 - _globals['_TASKFAILUREDETAILS']._serialized_end=840 - _globals['_PARENTINSTANCEINFO']._serialized_start=843 - _globals['_PARENTINSTANCEINFO']._serialized_end=1034 - _globals['_TRACECONTEXT']._serialized_start=1036 - _globals['_TRACECONTEXT']._serialized_end=1141 - _globals['_EXECUTIONSTARTEDEVENT']._serialized_start=1144 - _globals['_EXECUTIONSTARTEDEVENT']._serialized_end=1536 - _globals['_EXECUTIONCOMPLETEDEVENT']._serialized_start=1539 - _globals['_EXECUTIONCOMPLETEDEVENT']._serialized_end=1706 - _globals['_EXECUTIONTERMINATEDEVENT']._serialized_start=1708 - _globals['_EXECUTIONTERMINATEDEVENT']._serialized_end=1796 - _globals['_TASKSCHEDULEDEVENT']._serialized_start=1799 - _globals['_TASKSCHEDULEDEVENT']._serialized_end=1968 - _globals['_TASKCOMPLETEDEVENT']._serialized_start=1970 - _globals['_TASKCOMPLETEDEVENT']._serialized_end=2061 - _globals['_TASKFAILEDEVENT']._serialized_start=2063 - _globals['_TASKFAILEDEVENT']._serialized_end=2150 - _globals['_SUBORCHESTRATIONINSTANCECREATEDEVENT']._serialized_start=2153 - _globals['_SUBORCHESTRATIONINSTANCECREATEDEVENT']._serialized_end=2360 - _globals['_SUBORCHESTRATIONINSTANCECOMPLETEDEVENT']._serialized_start=2362 - _globals['_SUBORCHESTRATIONINSTANCECOMPLETEDEVENT']._serialized_end=2473 - _globals['_SUBORCHESTRATIONINSTANCEFAILEDEVENT']._serialized_start=2475 - _globals['_SUBORCHESTRATIONINSTANCEFAILEDEVENT']._serialized_end=2582 - _globals['_TIMERCREATEDEVENT']._serialized_start=2584 - _globals['_TIMERCREATEDEVENT']._serialized_end=2647 - _globals['_TIMERFIREDEVENT']._serialized_start=2649 - _globals['_TIMERFIREDEVENT']._serialized_end=2727 - _globals['_ORCHESTRATORSTARTEDEVENT']._serialized_start=2729 - _globals['_ORCHESTRATORSTARTEDEVENT']._serialized_end=2755 - _globals['_ORCHESTRATORCOMPLETEDEVENT']._serialized_start=2757 - _globals['_ORCHESTRATORCOMPLETEDEVENT']._serialized_end=2785 - _globals['_EVENTSENTEVENT']._serialized_start=2787 - _globals['_EVENTSENTEVENT']._serialized_end=2882 - _globals['_EVENTRAISEDEVENT']._serialized_start=2884 - _globals['_EVENTRAISEDEVENT']._serialized_end=2961 - _globals['_GENERICEVENT']._serialized_start=2963 - _globals['_GENERICEVENT']._serialized_end=3021 - _globals['_HISTORYSTATEEVENT']._serialized_start=3023 - _globals['_HISTORYSTATEEVENT']._serialized_end=3091 - _globals['_CONTINUEASNEWEVENT']._serialized_start=3093 - _globals['_CONTINUEASNEWEVENT']._serialized_end=3158 - _globals['_EXECUTIONSUSPENDEDEVENT']._serialized_start=3160 - _globals['_EXECUTIONSUSPENDEDEVENT']._serialized_end=3230 - _globals['_EXECUTIONRESUMEDEVENT']._serialized_start=3232 - _globals['_EXECUTIONRESUMEDEVENT']._serialized_end=3300 - _globals['_HISTORYEVENT']._serialized_start=3303 - _globals['_HISTORYEVENT']._serialized_end=4461 - _globals['_SCHEDULETASKACTION']._serialized_start=4463 - _globals['_SCHEDULETASKACTION']._serialized_end=4589 - _globals['_CREATESUBORCHESTRATIONACTION']._serialized_start=4592 - _globals['_CREATESUBORCHESTRATIONACTION']._serialized_end=4748 - _globals['_CREATETIMERACTION']._serialized_start=4750 - _globals['_CREATETIMERACTION']._serialized_end=4813 - _globals['_SENDEVENTACTION']._serialized_start=4815 - _globals['_SENDEVENTACTION']._serialized_end=4932 - _globals['_COMPLETEORCHESTRATIONACTION']._serialized_start=4935 - _globals['_COMPLETEORCHESTRATIONACTION']._serialized_end=5243 - _globals['_TERMINATEORCHESTRATIONACTION']._serialized_start=5245 - _globals['_TERMINATEORCHESTRATIONACTION']._serialized_end=5358 - _globals['_ORCHESTRATORACTION']._serialized_start=5361 - _globals['_ORCHESTRATORACTION']._serialized_end=5739 - _globals['_ORCHESTRATORREQUEST']._serialized_start=5742 - _globals['_ORCHESTRATORREQUEST']._serialized_end=5960 - _globals['_ORCHESTRATORRESPONSE']._serialized_start=5963 - _globals['_ORCHESTRATORRESPONSE']._serialized_end=6095 - _globals['_CREATEINSTANCEREQUEST']._serialized_start=6098 - _globals['_CREATEINSTANCEREQUEST']._serialized_end=6517 - _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_start=6474 - _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_end=6517 - _globals['_ORCHESTRATIONIDREUSEPOLICY']._serialized_start=6519 - _globals['_ORCHESTRATIONIDREUSEPOLICY']._serialized_end=6638 - _globals['_CREATEINSTANCERESPONSE']._serialized_start=6640 - _globals['_CREATEINSTANCERESPONSE']._serialized_end=6684 - _globals['_GETINSTANCEREQUEST']._serialized_start=6686 - _globals['_GETINSTANCEREQUEST']._serialized_end=6755 - _globals['_GETINSTANCERESPONSE']._serialized_start=6757 - _globals['_GETINSTANCERESPONSE']._serialized_end=6843 - _globals['_REWINDINSTANCEREQUEST']._serialized_start=6845 - _globals['_REWINDINSTANCEREQUEST']._serialized_end=6934 - _globals['_REWINDINSTANCERESPONSE']._serialized_start=6936 - _globals['_REWINDINSTANCERESPONSE']._serialized_end=6960 - _globals['_ORCHESTRATIONSTATE']._serialized_start=6963 - _globals['_ORCHESTRATIONSTATE']._serialized_end=7639 - _globals['_RAISEEVENTREQUEST']._serialized_start=7641 - _globals['_RAISEEVENTREQUEST']._serialized_end=7739 - _globals['_RAISEEVENTRESPONSE']._serialized_start=7741 - _globals['_RAISEEVENTRESPONSE']._serialized_end=7761 - _globals['_TERMINATEREQUEST']._serialized_start=7763 - _globals['_TERMINATEREQUEST']._serialized_end=7866 - _globals['_TERMINATERESPONSE']._serialized_start=7868 - _globals['_TERMINATERESPONSE']._serialized_end=7887 - _globals['_SUSPENDREQUEST']._serialized_start=7889 - _globals['_SUSPENDREQUEST']._serialized_end=7971 - _globals['_SUSPENDRESPONSE']._serialized_start=7973 - _globals['_SUSPENDRESPONSE']._serialized_end=7990 - _globals['_RESUMEREQUEST']._serialized_start=7992 - _globals['_RESUMEREQUEST']._serialized_end=8073 - _globals['_RESUMERESPONSE']._serialized_start=8075 - _globals['_RESUMERESPONSE']._serialized_end=8091 - _globals['_QUERYINSTANCESREQUEST']._serialized_start=8093 - _globals['_QUERYINSTANCESREQUEST']._serialized_end=8147 - _globals['_INSTANCEQUERY']._serialized_start=8150 - _globals['_INSTANCEQUERY']._serialized_end=8536 - _globals['_QUERYINSTANCESRESPONSE']._serialized_start=8539 - _globals['_QUERYINSTANCESRESPONSE']._serialized_end=8669 - _globals['_PURGEINSTANCESREQUEST']._serialized_start=8672 - _globals['_PURGEINSTANCESREQUEST']._serialized_end=8800 - _globals['_PURGEINSTANCEFILTER']._serialized_start=8803 - _globals['_PURGEINSTANCEFILTER']._serialized_end=8973 - _globals['_PURGEINSTANCESRESPONSE']._serialized_start=8975 - _globals['_PURGEINSTANCESRESPONSE']._serialized_end=9029 - _globals['_CREATETASKHUBREQUEST']._serialized_start=9031 - _globals['_CREATETASKHUBREQUEST']._serialized_end=9079 - _globals['_CREATETASKHUBRESPONSE']._serialized_start=9081 - _globals['_CREATETASKHUBRESPONSE']._serialized_end=9104 - _globals['_DELETETASKHUBREQUEST']._serialized_start=9106 - _globals['_DELETETASKHUBREQUEST']._serialized_end=9128 - _globals['_DELETETASKHUBRESPONSE']._serialized_start=9130 - _globals['_DELETETASKHUBRESPONSE']._serialized_end=9153 - _globals['_SIGNALENTITYREQUEST']._serialized_start=9156 - _globals['_SIGNALENTITYREQUEST']._serialized_end=9326 - _globals['_SIGNALENTITYRESPONSE']._serialized_start=9328 - _globals['_SIGNALENTITYRESPONSE']._serialized_end=9350 - _globals['_GETENTITYREQUEST']._serialized_start=9352 - _globals['_GETENTITYREQUEST']._serialized_end=9412 - _globals['_GETENTITYRESPONSE']._serialized_start=9414 - _globals['_GETENTITYRESPONSE']._serialized_end=9482 - _globals['_ENTITYQUERY']._serialized_start=9485 - _globals['_ENTITYQUERY']._serialized_end=9816 - _globals['_QUERYENTITIESREQUEST']._serialized_start=9818 - _globals['_QUERYENTITIESREQUEST']._serialized_end=9869 - _globals['_QUERYENTITIESRESPONSE']._serialized_start=9871 - _globals['_QUERYENTITIESRESPONSE']._serialized_end=9986 - _globals['_ENTITYMETADATA']._serialized_start=9989 - _globals['_ENTITYMETADATA']._serialized_end=10208 - _globals['_CLEANENTITYSTORAGEREQUEST']._serialized_start=10211 - _globals['_CLEANENTITYSTORAGEREQUEST']._serialized_end=10354 - _globals['_CLEANENTITYSTORAGERESPONSE']._serialized_start=10357 - _globals['_CLEANENTITYSTORAGERESPONSE']._serialized_end=10503 - _globals['_ORCHESTRATORENTITYPARAMETERS']._serialized_start=10505 - _globals['_ORCHESTRATORENTITYPARAMETERS']._serialized_end=10598 - _globals['_ENTITYBATCHREQUEST']._serialized_start=10601 - _globals['_ENTITYBATCHREQUEST']._serialized_end=10731 - _globals['_ENTITYBATCHRESULT']._serialized_start=10734 - _globals['_ENTITYBATCHRESULT']._serialized_end=10919 - _globals['_OPERATIONREQUEST']._serialized_start=10921 - _globals['_OPERATIONREQUEST']._serialized_end=11022 - _globals['_OPERATIONRESULT']._serialized_start=11024 - _globals['_OPERATIONRESULT']._serialized_end=11143 - _globals['_OPERATIONRESULTSUCCESS']._serialized_start=11145 - _globals['_OPERATIONRESULTSUCCESS']._serialized_end=11215 - _globals['_OPERATIONRESULTFAILURE']._serialized_start=11217 - _globals['_OPERATIONRESULTFAILURE']._serialized_end=11286 - _globals['_OPERATIONACTION']._serialized_start=11289 - _globals['_OPERATIONACTION']._serialized_end=11445 - _globals['_SENDSIGNALACTION']._serialized_start=11448 - _globals['_SENDSIGNALACTION']._serialized_end=11596 - _globals['_STARTNEWORCHESTRATIONACTION']._serialized_start=11599 - _globals['_STARTNEWORCHESTRATIONACTION']._serialized_end=11805 - _globals['_GETWORKITEMSREQUEST']._serialized_start=11807 - _globals['_GETWORKITEMSREQUEST']._serialized_end=11828 - _globals['_WORKITEM']._serialized_start=11831 - _globals['_WORKITEM']._serialized_end=12056 - _globals['_COMPLETETASKRESPONSE']._serialized_start=12058 - _globals['_COMPLETETASKRESPONSE']._serialized_end=12080 - _globals['_HEALTHPING']._serialized_start=12082 - _globals['_HEALTHPING']._serialized_end=12094 - _globals['_TASKHUBSIDECARSERVICE']._serialized_start=12476 - _globals['_TASKHUBSIDECARSERVICE']._serialized_end=13880 + _globals['_ACTIVITYRESPONSE']._serialized_end=684 + _globals['_TASKFAILUREDETAILS']._serialized_start=687 + _globals['_TASKFAILUREDETAILS']._serialized_end=865 + _globals['_PARENTINSTANCEINFO']._serialized_start=868 + _globals['_PARENTINSTANCEINFO']._serialized_end=1059 + _globals['_TRACECONTEXT']._serialized_start=1061 + _globals['_TRACECONTEXT']._serialized_end=1166 + _globals['_EXECUTIONSTARTEDEVENT']._serialized_start=1169 + _globals['_EXECUTIONSTARTEDEVENT']._serialized_end=1561 + _globals['_EXECUTIONCOMPLETEDEVENT']._serialized_start=1564 + _globals['_EXECUTIONCOMPLETEDEVENT']._serialized_end=1731 + _globals['_EXECUTIONTERMINATEDEVENT']._serialized_start=1733 + _globals['_EXECUTIONTERMINATEDEVENT']._serialized_end=1821 + _globals['_TASKSCHEDULEDEVENT']._serialized_start=1824 + _globals['_TASKSCHEDULEDEVENT']._serialized_end=1993 + _globals['_TASKCOMPLETEDEVENT']._serialized_start=1995 + _globals['_TASKCOMPLETEDEVENT']._serialized_end=2086 + _globals['_TASKFAILEDEVENT']._serialized_start=2088 + _globals['_TASKFAILEDEVENT']._serialized_end=2175 + _globals['_SUBORCHESTRATIONINSTANCECREATEDEVENT']._serialized_start=2178 + _globals['_SUBORCHESTRATIONINSTANCECREATEDEVENT']._serialized_end=2385 + _globals['_SUBORCHESTRATIONINSTANCECOMPLETEDEVENT']._serialized_start=2387 + _globals['_SUBORCHESTRATIONINSTANCECOMPLETEDEVENT']._serialized_end=2498 + _globals['_SUBORCHESTRATIONINSTANCEFAILEDEVENT']._serialized_start=2500 + _globals['_SUBORCHESTRATIONINSTANCEFAILEDEVENT']._serialized_end=2607 + _globals['_TIMERCREATEDEVENT']._serialized_start=2609 + _globals['_TIMERCREATEDEVENT']._serialized_end=2672 + _globals['_TIMERFIREDEVENT']._serialized_start=2674 + _globals['_TIMERFIREDEVENT']._serialized_end=2752 + _globals['_ORCHESTRATORSTARTEDEVENT']._serialized_start=2754 + _globals['_ORCHESTRATORSTARTEDEVENT']._serialized_end=2780 + _globals['_ORCHESTRATORCOMPLETEDEVENT']._serialized_start=2782 + _globals['_ORCHESTRATORCOMPLETEDEVENT']._serialized_end=2810 + _globals['_EVENTSENTEVENT']._serialized_start=2812 + _globals['_EVENTSENTEVENT']._serialized_end=2907 + _globals['_EVENTRAISEDEVENT']._serialized_start=2909 + _globals['_EVENTRAISEDEVENT']._serialized_end=2986 + _globals['_GENERICEVENT']._serialized_start=2988 + _globals['_GENERICEVENT']._serialized_end=3046 + _globals['_HISTORYSTATEEVENT']._serialized_start=3048 + _globals['_HISTORYSTATEEVENT']._serialized_end=3116 + _globals['_CONTINUEASNEWEVENT']._serialized_start=3118 + _globals['_CONTINUEASNEWEVENT']._serialized_end=3183 + _globals['_EXECUTIONSUSPENDEDEVENT']._serialized_start=3185 + _globals['_EXECUTIONSUSPENDEDEVENT']._serialized_end=3255 + _globals['_EXECUTIONRESUMEDEVENT']._serialized_start=3257 + _globals['_EXECUTIONRESUMEDEVENT']._serialized_end=3325 + _globals['_HISTORYEVENT']._serialized_start=3328 + _globals['_HISTORYEVENT']._serialized_end=4486 + _globals['_SCHEDULETASKACTION']._serialized_start=4488 + _globals['_SCHEDULETASKACTION']._serialized_end=4614 + _globals['_CREATESUBORCHESTRATIONACTION']._serialized_start=4617 + _globals['_CREATESUBORCHESTRATIONACTION']._serialized_end=4773 + _globals['_CREATETIMERACTION']._serialized_start=4775 + _globals['_CREATETIMERACTION']._serialized_end=4838 + _globals['_SENDEVENTACTION']._serialized_start=4840 + _globals['_SENDEVENTACTION']._serialized_end=4957 + _globals['_COMPLETEORCHESTRATIONACTION']._serialized_start=4960 + _globals['_COMPLETEORCHESTRATIONACTION']._serialized_end=5268 + _globals['_TERMINATEORCHESTRATIONACTION']._serialized_start=5270 + _globals['_TERMINATEORCHESTRATIONACTION']._serialized_end=5383 + _globals['_ORCHESTRATORACTION']._serialized_start=5386 + _globals['_ORCHESTRATORACTION']._serialized_end=5764 + _globals['_ORCHESTRATORREQUEST']._serialized_start=5767 + _globals['_ORCHESTRATORREQUEST']._serialized_end=5985 + _globals['_ORCHESTRATORRESPONSE']._serialized_start=5988 + _globals['_ORCHESTRATORRESPONSE']._serialized_end=6145 + _globals['_CREATEINSTANCEREQUEST']._serialized_start=6148 + _globals['_CREATEINSTANCEREQUEST']._serialized_end=6567 + _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_start=6524 + _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_end=6567 + _globals['_ORCHESTRATIONIDREUSEPOLICY']._serialized_start=6569 + _globals['_ORCHESTRATIONIDREUSEPOLICY']._serialized_end=6688 + _globals['_CREATEINSTANCERESPONSE']._serialized_start=6690 + _globals['_CREATEINSTANCERESPONSE']._serialized_end=6734 + _globals['_GETINSTANCEREQUEST']._serialized_start=6736 + _globals['_GETINSTANCEREQUEST']._serialized_end=6805 + _globals['_GETINSTANCERESPONSE']._serialized_start=6807 + _globals['_GETINSTANCERESPONSE']._serialized_end=6893 + _globals['_REWINDINSTANCEREQUEST']._serialized_start=6895 + _globals['_REWINDINSTANCEREQUEST']._serialized_end=6984 + _globals['_REWINDINSTANCERESPONSE']._serialized_start=6986 + _globals['_REWINDINSTANCERESPONSE']._serialized_end=7010 + _globals['_ORCHESTRATIONSTATE']._serialized_start=7013 + _globals['_ORCHESTRATIONSTATE']._serialized_end=7689 + _globals['_RAISEEVENTREQUEST']._serialized_start=7691 + _globals['_RAISEEVENTREQUEST']._serialized_end=7789 + _globals['_RAISEEVENTRESPONSE']._serialized_start=7791 + _globals['_RAISEEVENTRESPONSE']._serialized_end=7811 + _globals['_TERMINATEREQUEST']._serialized_start=7813 + _globals['_TERMINATEREQUEST']._serialized_end=7916 + _globals['_TERMINATERESPONSE']._serialized_start=7918 + _globals['_TERMINATERESPONSE']._serialized_end=7937 + _globals['_SUSPENDREQUEST']._serialized_start=7939 + _globals['_SUSPENDREQUEST']._serialized_end=8021 + _globals['_SUSPENDRESPONSE']._serialized_start=8023 + _globals['_SUSPENDRESPONSE']._serialized_end=8040 + _globals['_RESUMEREQUEST']._serialized_start=8042 + _globals['_RESUMEREQUEST']._serialized_end=8123 + _globals['_RESUMERESPONSE']._serialized_start=8125 + _globals['_RESUMERESPONSE']._serialized_end=8141 + _globals['_QUERYINSTANCESREQUEST']._serialized_start=8143 + _globals['_QUERYINSTANCESREQUEST']._serialized_end=8197 + _globals['_INSTANCEQUERY']._serialized_start=8200 + _globals['_INSTANCEQUERY']._serialized_end=8586 + _globals['_QUERYINSTANCESRESPONSE']._serialized_start=8589 + _globals['_QUERYINSTANCESRESPONSE']._serialized_end=8719 + _globals['_PURGEINSTANCESREQUEST']._serialized_start=8722 + _globals['_PURGEINSTANCESREQUEST']._serialized_end=8850 + _globals['_PURGEINSTANCEFILTER']._serialized_start=8853 + _globals['_PURGEINSTANCEFILTER']._serialized_end=9023 + _globals['_PURGEINSTANCESRESPONSE']._serialized_start=9025 + _globals['_PURGEINSTANCESRESPONSE']._serialized_end=9079 + _globals['_CREATETASKHUBREQUEST']._serialized_start=9081 + _globals['_CREATETASKHUBREQUEST']._serialized_end=9129 + _globals['_CREATETASKHUBRESPONSE']._serialized_start=9131 + _globals['_CREATETASKHUBRESPONSE']._serialized_end=9154 + _globals['_DELETETASKHUBREQUEST']._serialized_start=9156 + _globals['_DELETETASKHUBREQUEST']._serialized_end=9178 + _globals['_DELETETASKHUBRESPONSE']._serialized_start=9180 + _globals['_DELETETASKHUBRESPONSE']._serialized_end=9203 + _globals['_SIGNALENTITYREQUEST']._serialized_start=9206 + _globals['_SIGNALENTITYREQUEST']._serialized_end=9376 + _globals['_SIGNALENTITYRESPONSE']._serialized_start=9378 + _globals['_SIGNALENTITYRESPONSE']._serialized_end=9400 + _globals['_GETENTITYREQUEST']._serialized_start=9402 + _globals['_GETENTITYREQUEST']._serialized_end=9462 + _globals['_GETENTITYRESPONSE']._serialized_start=9464 + _globals['_GETENTITYRESPONSE']._serialized_end=9532 + _globals['_ENTITYQUERY']._serialized_start=9535 + _globals['_ENTITYQUERY']._serialized_end=9866 + _globals['_QUERYENTITIESREQUEST']._serialized_start=9868 + _globals['_QUERYENTITIESREQUEST']._serialized_end=9919 + _globals['_QUERYENTITIESRESPONSE']._serialized_start=9921 + _globals['_QUERYENTITIESRESPONSE']._serialized_end=10036 + _globals['_ENTITYMETADATA']._serialized_start=10039 + _globals['_ENTITYMETADATA']._serialized_end=10258 + _globals['_CLEANENTITYSTORAGEREQUEST']._serialized_start=10261 + _globals['_CLEANENTITYSTORAGEREQUEST']._serialized_end=10404 + _globals['_CLEANENTITYSTORAGERESPONSE']._serialized_start=10407 + _globals['_CLEANENTITYSTORAGERESPONSE']._serialized_end=10553 + _globals['_ORCHESTRATORENTITYPARAMETERS']._serialized_start=10555 + _globals['_ORCHESTRATORENTITYPARAMETERS']._serialized_end=10648 + _globals['_ENTITYBATCHREQUEST']._serialized_start=10651 + _globals['_ENTITYBATCHREQUEST']._serialized_end=10781 + _globals['_ENTITYBATCHRESULT']._serialized_start=10784 + _globals['_ENTITYBATCHRESULT']._serialized_end=10969 + _globals['_OPERATIONREQUEST']._serialized_start=10971 + _globals['_OPERATIONREQUEST']._serialized_end=11072 + _globals['_OPERATIONRESULT']._serialized_start=11074 + _globals['_OPERATIONRESULT']._serialized_end=11193 + _globals['_OPERATIONRESULTSUCCESS']._serialized_start=11195 + _globals['_OPERATIONRESULTSUCCESS']._serialized_end=11265 + _globals['_OPERATIONRESULTFAILURE']._serialized_start=11267 + _globals['_OPERATIONRESULTFAILURE']._serialized_end=11336 + _globals['_OPERATIONACTION']._serialized_start=11339 + _globals['_OPERATIONACTION']._serialized_end=11495 + _globals['_SENDSIGNALACTION']._serialized_start=11498 + _globals['_SENDSIGNALACTION']._serialized_end=11646 + _globals['_STARTNEWORCHESTRATIONACTION']._serialized_start=11649 + _globals['_STARTNEWORCHESTRATIONACTION']._serialized_end=11855 + _globals['_GETWORKITEMSREQUEST']._serialized_start=11857 + _globals['_GETWORKITEMSREQUEST']._serialized_end=11963 + _globals['_WORKITEM']._serialized_start=11966 + _globals['_WORKITEM']._serialized_end=12191 + _globals['_COMPLETETASKRESPONSE']._serialized_start=12193 + _globals['_COMPLETETASKRESPONSE']._serialized_end=12215 + _globals['_HEALTHPING']._serialized_start=12217 + _globals['_HEALTHPING']._serialized_end=12229 + _globals['_TASKHUBSIDECARSERVICE']._serialized_start=12611 + _globals['_TASKHUBSIDECARSERVICE']._serialized_end=14015 # @@protoc_insertion_point(module_scope) diff --git a/durabletask/internal/orchestrator_service_pb2.pyi b/durabletask/internal/orchestrator_service_pb2.pyi index 82d2e1a..84d2af8 100644 --- a/durabletask/internal/orchestrator_service_pb2.pyi +++ b/durabletask/internal/orchestrator_service_pb2.pyi @@ -63,16 +63,18 @@ class ActivityRequest(_message.Message): def __init__(self, name: _Optional[str] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., orchestrationInstance: _Optional[_Union[OrchestrationInstance, _Mapping]] = ..., taskId: _Optional[int] = ..., parentTraceContext: _Optional[_Union[TraceContext, _Mapping]] = ...) -> None: ... class ActivityResponse(_message.Message): - __slots__ = ("instanceId", "taskId", "result", "failureDetails") + __slots__ = ("instanceId", "taskId", "result", "failureDetails", "completionToken") INSTANCEID_FIELD_NUMBER: _ClassVar[int] TASKID_FIELD_NUMBER: _ClassVar[int] RESULT_FIELD_NUMBER: _ClassVar[int] FAILUREDETAILS_FIELD_NUMBER: _ClassVar[int] + COMPLETIONTOKEN_FIELD_NUMBER: _ClassVar[int] instanceId: str taskId: int result: _wrappers_pb2.StringValue failureDetails: TaskFailureDetails - def __init__(self, instanceId: _Optional[str] = ..., taskId: _Optional[int] = ..., result: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., failureDetails: _Optional[_Union[TaskFailureDetails, _Mapping]] = ...) -> None: ... + completionToken: str + def __init__(self, instanceId: _Optional[str] = ..., taskId: _Optional[int] = ..., result: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., failureDetails: _Optional[_Union[TaskFailureDetails, _Mapping]] = ..., completionToken: _Optional[str] = ...) -> None: ... class TaskFailureDetails(_message.Message): __slots__ = ("errorType", "errorMessage", "stackTrace", "innerFailure", "isNonRetriable") @@ -421,14 +423,16 @@ class OrchestratorRequest(_message.Message): def __init__(self, instanceId: _Optional[str] = ..., executionId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., pastEvents: _Optional[_Iterable[_Union[HistoryEvent, _Mapping]]] = ..., newEvents: _Optional[_Iterable[_Union[HistoryEvent, _Mapping]]] = ..., entityParameters: _Optional[_Union[OrchestratorEntityParameters, _Mapping]] = ...) -> None: ... class OrchestratorResponse(_message.Message): - __slots__ = ("instanceId", "actions", "customStatus") + __slots__ = ("instanceId", "actions", "customStatus", "completionToken") INSTANCEID_FIELD_NUMBER: _ClassVar[int] ACTIONS_FIELD_NUMBER: _ClassVar[int] CUSTOMSTATUS_FIELD_NUMBER: _ClassVar[int] + COMPLETIONTOKEN_FIELD_NUMBER: _ClassVar[int] instanceId: str actions: _containers.RepeatedCompositeFieldContainer[OrchestratorAction] customStatus: _wrappers_pb2.StringValue - def __init__(self, instanceId: _Optional[str] = ..., actions: _Optional[_Iterable[_Union[OrchestratorAction, _Mapping]]] = ..., customStatus: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ...) -> None: ... + completionToken: str + def __init__(self, instanceId: _Optional[str] = ..., actions: _Optional[_Iterable[_Union[OrchestratorAction, _Mapping]]] = ..., customStatus: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., completionToken: _Optional[str] = ...) -> None: ... class CreateInstanceRequest(_message.Message): __slots__ = ("instanceId", "name", "version", "input", "scheduledStartTimestamp", "orchestrationIdReusePolicy", "executionId", "tags") @@ -856,8 +860,12 @@ class StartNewOrchestrationAction(_message.Message): def __init__(self, instanceId: _Optional[str] = ..., name: _Optional[str] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., scheduledTime: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ...) -> None: ... class GetWorkItemsRequest(_message.Message): - __slots__ = () - def __init__(self) -> None: ... + __slots__ = ("maxConcurrentOrchestrationWorkItems", "maxConcurrentActivityWorkItems") + MAXCONCURRENTORCHESTRATIONWORKITEMS_FIELD_NUMBER: _ClassVar[int] + MAXCONCURRENTACTIVITYWORKITEMS_FIELD_NUMBER: _ClassVar[int] + maxConcurrentOrchestrationWorkItems: int + maxConcurrentActivityWorkItems: int + def __init__(self, maxConcurrentOrchestrationWorkItems: _Optional[int] = ..., maxConcurrentActivityWorkItems: _Optional[int] = ...) -> None: ... class WorkItem(_message.Message): __slots__ = ("orchestratorRequest", "activityRequest", "entityRequest", "healthPing", "completionToken") diff --git a/submodules/durabletask-protobuf b/submodules/durabletask-protobuf deleted file mode 160000 index c7d8cd8..0000000 --- a/submodules/durabletask-protobuf +++ /dev/null @@ -1 +0,0 @@ -Subproject commit c7d8cd898017342d090ba9531c3f2ec45b8e07e7 From 37544cf157adcb11726f5d0b5319e7c6f57c566f Mon Sep 17 00:00:00 2001 From: wangbill Date: Thu, 23 Jan 2025 11:57:56 -0800 Subject: [PATCH 17/81] remove gitmodule file (#41) Signed-off-by: Albert Callarisa --- .gitmodules | 3 --- 1 file changed, 3 deletions(-) delete mode 100644 .gitmodules diff --git a/.gitmodules b/.gitmodules deleted file mode 100644 index b371516..0000000 --- a/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "submodules/durabletask-protobuf"] - path = submodules/durabletask-protobuf - url = https://github.com/microsoft/durabletask-protobuf From 2bdf87f7b0e0218df55873e4e2bb9284c0d40138 Mon Sep 17 00:00:00 2001 From: Ryan Lettieri <67934986+RyanLettieri@users.noreply.github.com> Date: Tue, 18 Feb 2025 15:46:37 -0700 Subject: [PATCH 18/81] Creation of DTS example and passing of completionToken (#40) * Creation of DTS example and passing of completionToken Signed-off-by: Ryan Lettieri <67934986+RyanLettieri@users.noreply.github.com> * Adressing review feedback Signed-off-by: Ryan Lettieri * Reverting dapr readme Signed-off-by: Ryan Lettieri * Adding accessTokenManager class for refreshing credential token Signed-off-by: Ryan Lettieri * Adding comments to the example Signed-off-by: Ryan Lettieri * Adding in requirement for azure-identity Signed-off-by: Ryan Lettieri * Moving dts logic into its own module Signed-off-by: Ryan Lettieri * Fixing whitesapce Signed-off-by: Ryan Lettieri * Updating dts client to refresh token Signed-off-by: Ryan Lettieri * Cleaning up construction of dts objects and improving examples Signed-off-by: Ryan Lettieri * Migrating shared access token logic to new grpc class Signed-off-by: Ryan Lettieri * Adding log statements to access_token_manager Signed-off-by: Ryan Lettieri * breaking for loop when setting interceptors Signed-off-by: Ryan Lettieri * Removing changes to client.py and adding additional steps to readme.md Signed-off-by: Ryan Lettieri * Refactoring client and worker to pass around interceptors Signed-off-by: Ryan Lettieri * Fixing import for DefaultClientInterceptorImpl Signed-off-by: Ryan Lettieri * Adressing round 1 of feedback Signed-off-by: Ryan Lettieri * Fixing interceptor issue Signed-off-by: Ryan Lettieri * Moving some files around to remove dependencies Signed-off-by: Ryan Lettieri * Adressing more feedback Signed-off-by: Ryan Lettieri * More review feedback Signed-off-by: Ryan Lettieri * Passing token credential as an argument rather than 2 strings Signed-off-by: Ryan Lettieri * More review feedback for token passing Signed-off-by: Ryan Lettieri * Addressing None comment and using correct metadata Signed-off-by: Ryan Lettieri * Updating unit tests Signed-off-by: Ryan Lettieri * Fixing the type for the unit test Signed-off-by: Ryan Lettieri * Fixing grpc calls Signed-off-by: Ryan Lettieri * Fix linter errors and update documentation * Specifying version reqiuirement for pyproject.toml Signed-off-by: Ryan Lettieri * Updating README Signed-off-by: Ryan Lettieri * Adding comment for credential type Signed-off-by: Ryan Lettieri --------- Signed-off-by: Ryan Lettieri <67934986+RyanLettieri@users.noreply.github.com> Signed-off-by: Ryan Lettieri Co-authored-by: Chris Gillum Signed-off-by: Albert Callarisa --- CHANGELOG.md | 1 + README.md | 7 +- durabletask-azuremanaged/__init__.py | 0 .../durabletask/azuremanaged/__init__.py | 0 .../durabletask/azuremanaged/client.py | 30 ++++++ .../internal/access_token_manager.py | 49 ++++++++++ .../internal/durabletask_grpc_interceptor.py | 41 ++++++++ .../durabletask/azuremanaged/worker.py | 30 ++++++ durabletask-azuremanaged/pyproject.toml | 41 ++++++++ durabletask/client.py | 26 ++++- durabletask/internal/grpc_interceptor.py | 12 +-- durabletask/internal/shared.py | 22 +++-- durabletask/task.py | 7 +- durabletask/worker.py | 42 +++++--- examples/README.md | 2 +- examples/dts/README.md | 55 +++++++++++ examples/dts/dts_activity_sequence.py | 71 ++++++++++++++ examples/dts/dts_fanout_fanin.py | 96 +++++++++++++++++++ requirements.txt | 2 + tests/test_client.py | 34 +++---- 20 files changed, 514 insertions(+), 54 deletions(-) create mode 100644 durabletask-azuremanaged/__init__.py create mode 100644 durabletask-azuremanaged/durabletask/azuremanaged/__init__.py create mode 100644 durabletask-azuremanaged/durabletask/azuremanaged/client.py create mode 100644 durabletask-azuremanaged/durabletask/azuremanaged/internal/access_token_manager.py create mode 100644 durabletask-azuremanaged/durabletask/azuremanaged/internal/durabletask_grpc_interceptor.py create mode 100644 durabletask-azuremanaged/durabletask/azuremanaged/worker.py create mode 100644 durabletask-azuremanaged/pyproject.toml create mode 100644 examples/dts/README.md create mode 100644 examples/dts/dts_activity_sequence.py create mode 100644 examples/dts/dts_fanout_fanin.py diff --git a/CHANGELOG.md b/CHANGELOG.md index ee736f0..13b0e69 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Added `set_custom_status` orchestrator API ([#31](https://github.com/microsoft/durabletask-python/pull/31)) - contributed by [@famarting](https://github.com/famarting) - Added `purge_orchestration` client API ([#34](https://github.com/microsoft/durabletask-python/pull/34)) - contributed by [@famarting](https://github.com/famarting) +- Added new `durabletask-azuremanaged` package for use with the [Durable Task Scheduler](https://techcommunity.microsoft.com/blog/appsonazureblog/announcing-limited-early-access-of-the-durable-task-scheduler-for-azure-durable-/4286526) - by [@RyanLettieri](https://github.com/RyanLettieri) ### Changes diff --git a/README.md b/README.md index 644635e..87af41d 100644 --- a/README.md +++ b/README.md @@ -1,15 +1,14 @@ -# Durable Task Client SDK for Python +# Durable Task SDK for Python [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT) [![Build Validation](https://github.com/microsoft/durabletask-python/actions/workflows/pr-validation.yml/badge.svg)](https://github.com/microsoft/durabletask-python/actions/workflows/pr-validation.yml) [![PyPI version](https://badge.fury.io/py/durabletask.svg)](https://badge.fury.io/py/durabletask) -This repo contains a Python client SDK for use with the [Durable Task Framework for Go](https://github.com/microsoft/durabletask-go) and [Dapr Workflow](https://docs.dapr.io/developing-applications/building-blocks/workflow/workflow-overview/). With this SDK, you can define, schedule, and manage durable orchestrations using ordinary Python code. +This repo contains a Python SDK for use with the [Azure Durable Task Scheduler](https://techcommunity.microsoft.com/blog/appsonazureblog/announcing-limited-early-access-of-the-durable-task-scheduler-for-azure-durable-/4286526) and the [Durable Task Framework for Go](https://github.com/microsoft/durabletask-go). With this SDK, you can define, schedule, and manage durable orchestrations using ordinary Python code. ⚠️ **This SDK is currently under active development and is not yet ready for production use.** ⚠️ -> Note that this project is **not** currently affiliated with the [Durable Functions](https://docs.microsoft.com/azure/azure-functions/durable/durable-functions-overview) project for Azure Functions. If you are looking for a Python SDK for Durable Functions, please see [this repo](https://github.com/Azure/azure-functions-durable-python). - +> Note that this SDK is **not** currently compatible with [Azure Durable Functions](https://docs.microsoft.com/azure/azure-functions/durable/durable-functions-overview). If you are looking for a Python SDK for Azure Durable Functions, please see [this repo](https://github.com/Azure/azure-functions-durable-python). ## Supported patterns diff --git a/durabletask-azuremanaged/__init__.py b/durabletask-azuremanaged/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/durabletask-azuremanaged/durabletask/azuremanaged/__init__.py b/durabletask-azuremanaged/durabletask/azuremanaged/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/durabletask-azuremanaged/durabletask/azuremanaged/client.py b/durabletask-azuremanaged/durabletask/azuremanaged/client.py new file mode 100644 index 0000000..f641eae --- /dev/null +++ b/durabletask-azuremanaged/durabletask/azuremanaged/client.py @@ -0,0 +1,30 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from azure.core.credentials import TokenCredential + +from durabletask.azuremanaged.internal.durabletask_grpc_interceptor import \ + DTSDefaultClientInterceptorImpl +from durabletask.client import TaskHubGrpcClient + + +# Client class used for Durable Task Scheduler (DTS) +class DurableTaskSchedulerClient(TaskHubGrpcClient): + def __init__(self, *, + host_address: str, + taskhub: str, + token_credential: TokenCredential, + secure_channel: bool = True): + + if not taskhub: + raise ValueError("Taskhub value cannot be empty. Please provide a value for your taskhub") + + interceptors = [DTSDefaultClientInterceptorImpl(token_credential, taskhub)] + + # We pass in None for the metadata so we don't construct an additional interceptor in the parent class + # Since the parent class doesn't use anything metadata for anything else, we can set it as None + super().__init__( + host_address=host_address, + secure_channel=secure_channel, + metadata=None, + interceptors=interceptors) diff --git a/durabletask-azuremanaged/durabletask/azuremanaged/internal/access_token_manager.py b/durabletask-azuremanaged/durabletask/azuremanaged/internal/access_token_manager.py new file mode 100644 index 0000000..f0e7a42 --- /dev/null +++ b/durabletask-azuremanaged/durabletask/azuremanaged/internal/access_token_manager.py @@ -0,0 +1,49 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +from datetime import datetime, timedelta, timezone +from typing import Optional + +from azure.core.credentials import AccessToken, TokenCredential + +import durabletask.internal.shared as shared + + +# By default, when there's 10minutes left before the token expires, refresh the token +class AccessTokenManager: + + _token: Optional[AccessToken] + + def __init__(self, token_credential: Optional[TokenCredential], refresh_interval_seconds: int = 600): + self._scope = "https://durabletask.io/.default" + self._refresh_interval_seconds = refresh_interval_seconds + self._logger = shared.get_logger("token_manager") + + self._credential = token_credential + + if self._credential is not None: + self._token = self._credential.get_token(self._scope) + self.expiry_time = datetime.fromtimestamp(self._token.expires_on, tz=timezone.utc) + else: + self._token = None + self.expiry_time = None + + def get_access_token(self) -> Optional[AccessToken]: + if self._token is None or self.is_token_expired(): + self.refresh_token() + return self._token + + # Checks if the token is expired, or if it will expire in the next "refresh_interval_seconds" seconds. + # For example, if the token is created to have a lifespan of 2 hours, and the refresh buffer is set to 30 minutes, + # We will grab a new token when there're 30minutes left on the lifespan of the token + def is_token_expired(self) -> bool: + if self.expiry_time is None: + return True + return datetime.now(timezone.utc) >= (self.expiry_time - timedelta(seconds=self._refresh_interval_seconds)) + + def refresh_token(self): + if self._credential is not None: + self._token = self._credential.get_token(self._scope) + + # Convert UNIX timestamp to timezone-aware datetime + self.expiry_time = datetime.fromtimestamp(self._token.expires_on, tz=timezone.utc) + self._logger.debug(f"Token refreshed. Expires at: {self.expiry_time}") diff --git a/durabletask-azuremanaged/durabletask/azuremanaged/internal/durabletask_grpc_interceptor.py b/durabletask-azuremanaged/durabletask/azuremanaged/internal/durabletask_grpc_interceptor.py new file mode 100644 index 0000000..a23cac9 --- /dev/null +++ b/durabletask-azuremanaged/durabletask/azuremanaged/internal/durabletask_grpc_interceptor.py @@ -0,0 +1,41 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +import grpc +from azure.core.credentials import TokenCredential + +from durabletask.azuremanaged.internal.access_token_manager import \ + AccessTokenManager +from durabletask.internal.grpc_interceptor import ( + DefaultClientInterceptorImpl, _ClientCallDetails) + + +class DTSDefaultClientInterceptorImpl (DefaultClientInterceptorImpl): + """The class implements a UnaryUnaryClientInterceptor, UnaryStreamClientInterceptor, + StreamUnaryClientInterceptor and StreamStreamClientInterceptor from grpc to add an + interceptor to add additional headers to all calls as needed.""" + + def __init__(self, token_credential: TokenCredential, taskhub_name: str): + self._metadata = [("taskhub", taskhub_name)] + super().__init__(self._metadata) + + if token_credential is not None: + self._token_credential = token_credential + self._token_manager = AccessTokenManager(token_credential=self._token_credential) + access_token = self._token_manager.get_access_token() + if access_token is not None: + self._metadata.append(("authorization", f"Bearer {access_token.token}")) + + def _intercept_call( + self, client_call_details: _ClientCallDetails) -> grpc.ClientCallDetails: + """Internal intercept_call implementation which adds metadata to grpc metadata in the RPC + call details.""" + # Refresh the auth token if it is present and needed + if self._metadata is not None: + for i, (key, _) in enumerate(self._metadata): + if key.lower() == "authorization": # Ensure case-insensitive comparison + new_token = self._token_manager.get_access_token() # Get the new token + if new_token is not None: + self._metadata[i] = ("authorization", f"Bearer {new_token.token}") # Update the token + + return super()._intercept_call(client_call_details) diff --git a/durabletask-azuremanaged/durabletask/azuremanaged/worker.py b/durabletask-azuremanaged/durabletask/azuremanaged/worker.py new file mode 100644 index 0000000..d10c2f7 --- /dev/null +++ b/durabletask-azuremanaged/durabletask/azuremanaged/worker.py @@ -0,0 +1,30 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from azure.core.credentials import TokenCredential + +from durabletask.azuremanaged.internal.durabletask_grpc_interceptor import \ + DTSDefaultClientInterceptorImpl +from durabletask.worker import TaskHubGrpcWorker + + +# Worker class used for Durable Task Scheduler (DTS) +class DurableTaskSchedulerWorker(TaskHubGrpcWorker): + def __init__(self, *, + host_address: str, + taskhub: str, + token_credential: TokenCredential, + secure_channel: bool = True): + + if not taskhub: + raise ValueError("The taskhub value cannot be empty.") + + interceptors = [DTSDefaultClientInterceptorImpl(token_credential, taskhub)] + + # We pass in None for the metadata so we don't construct an additional interceptor in the parent class + # Since the parent class doesn't use anything metadata for anything else, we can set it as None + super().__init__( + host_address=host_address, + secure_channel=secure_channel, + metadata=None, + interceptors=interceptors) diff --git a/durabletask-azuremanaged/pyproject.toml b/durabletask-azuremanaged/pyproject.toml new file mode 100644 index 0000000..ac6be6f --- /dev/null +++ b/durabletask-azuremanaged/pyproject.toml @@ -0,0 +1,41 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +# For more information on pyproject.toml, see https://peps.python.org/pep-0621/ + +[build-system] +requires = ["setuptools", "wheel"] +build-backend = "setuptools.build_meta" + +[project] +name = "durabletask.azuremanaged" +version = "0.1b1" +description = "Extensions for the Durable Task Python SDK for integrating with the Durable Task Scheduler in Azure" +keywords = [ + "durable", + "task", + "workflow", + "azure" +] +classifiers = [ + "Development Status :: 3 - Alpha", + "Programming Language :: Python :: 3", + "License :: OSI Approved :: MIT License", +] +requires-python = ">=3.9" +license = {file = "LICENSE"} +readme = "README.md" +dependencies = [ + "durabletask>=0.2.0", + "azure-identity>=1.19.0" +] + +[project.urls] +repository = "https://github.com/microsoft/durabletask-python" +changelog = "https://github.com/microsoft/durabletask-python/blob/main/CHANGELOG.md" + +[tool.setuptools.packages.find] +include = ["durabletask.azuremanaged", "durabletask.azuremanaged.*"] + +[tool.pytest.ini_options] +minversion = "6.0" diff --git a/durabletask/client.py b/durabletask/client.py index 31953ae..60e194f 100644 --- a/durabletask/client.py +++ b/durabletask/client.py @@ -6,7 +6,7 @@ from dataclasses import dataclass from datetime import datetime from enum import Enum -from typing import Any, Optional, TypeVar, Union +from typing import Any, Optional, Sequence, TypeVar, Union import grpc from google.protobuf import wrappers_pb2 @@ -16,6 +16,7 @@ import durabletask.internal.orchestrator_service_pb2_grpc as stubs import durabletask.internal.shared as shared from durabletask import task +from durabletask.internal.grpc_interceptor import DefaultClientInterceptorImpl TInput = TypeVar('TInput') TOutput = TypeVar('TOutput') @@ -96,8 +97,25 @@ def __init__(self, *, metadata: Optional[list[tuple[str, str]]] = None, log_handler: Optional[logging.Handler] = None, log_formatter: Optional[logging.Formatter] = None, - secure_channel: bool = False): - channel = shared.get_grpc_channel(host_address, metadata, secure_channel=secure_channel) + secure_channel: bool = False, + interceptors: Optional[Sequence[shared.ClientInterceptor]] = None): + + # If the caller provided metadata, we need to create a new interceptor for it and + # add it to the list of interceptors. + if interceptors is not None: + interceptors = list(interceptors) + if metadata is not None: + interceptors.append(DefaultClientInterceptorImpl(metadata)) + elif metadata is not None: + interceptors = [DefaultClientInterceptorImpl(metadata)] + else: + interceptors = None + + channel = shared.get_grpc_channel( + host_address=host_address, + secure_channel=secure_channel, + interceptors=interceptors + ) self._stub = stubs.TaskHubSidecarServiceStub(channel) self._logger = shared.get_logger("client", log_handler, log_formatter) @@ -116,7 +134,7 @@ def schedule_new_orchestration(self, orchestrator: Union[task.Orchestrator[TInpu scheduledStartTimestamp=helpers.new_timestamp(start_at) if start_at else None, version=wrappers_pb2.StringValue(value=""), orchestrationIdReusePolicy=reuse_id_policy, - ) + ) self._logger.info(f"Starting new '{name}' instance with ID = '{req.instanceId}'.") res: pb.CreateInstanceResponse = self._stub.StartInstance(req) diff --git a/durabletask/internal/grpc_interceptor.py b/durabletask/internal/grpc_interceptor.py index 738fca9..69db3c5 100644 --- a/durabletask/internal/grpc_interceptor.py +++ b/durabletask/internal/grpc_interceptor.py @@ -19,10 +19,10 @@ class _ClientCallDetails( class DefaultClientInterceptorImpl ( - grpc.UnaryUnaryClientInterceptor, grpc.UnaryStreamClientInterceptor, - grpc.StreamUnaryClientInterceptor, grpc.StreamStreamClientInterceptor): + grpc.UnaryUnaryClientInterceptor, grpc.UnaryStreamClientInterceptor, + grpc.StreamUnaryClientInterceptor, grpc.StreamStreamClientInterceptor): """The class implements a UnaryUnaryClientInterceptor, UnaryStreamClientInterceptor, - StreamUnaryClientInterceptor and StreamStreamClientInterceptor from grpc to add an + StreamUnaryClientInterceptor and StreamStreamClientInterceptor from grpc to add an interceptor to add additional headers to all calls as needed.""" def __init__(self, metadata: list[tuple[str, str]]): @@ -30,17 +30,17 @@ def __init__(self, metadata: list[tuple[str, str]]): self._metadata = metadata def _intercept_call( - self, client_call_details: _ClientCallDetails) -> grpc.ClientCallDetails: + self, client_call_details: _ClientCallDetails) -> grpc.ClientCallDetails: """Internal intercept_call implementation which adds metadata to grpc metadata in the RPC call details.""" if self._metadata is None: return client_call_details - + if client_call_details.metadata is not None: metadata = list(client_call_details.metadata) else: metadata = [] - + metadata.extend(self._metadata) client_call_details = _ClientCallDetails( client_call_details.method, client_call_details.timeout, metadata, diff --git a/durabletask/internal/shared.py b/durabletask/internal/shared.py index c4f3aa4..1872ad4 100644 --- a/durabletask/internal/shared.py +++ b/durabletask/internal/shared.py @@ -5,11 +5,16 @@ import json import logging from types import SimpleNamespace -from typing import Any, Optional +from typing import Any, Optional, Sequence, Union import grpc -from durabletask.internal.grpc_interceptor import DefaultClientInterceptorImpl +ClientInterceptor = Union[ + grpc.UnaryUnaryClientInterceptor, + grpc.UnaryStreamClientInterceptor, + grpc.StreamUnaryClientInterceptor, + grpc.StreamStreamClientInterceptor +] # Field name used to indicate that an object was automatically serialized # and should be deserialized as a SimpleNamespace @@ -25,8 +30,9 @@ def get_default_host_address() -> str: def get_grpc_channel( host_address: Optional[str], - metadata: Optional[list[tuple[str, str]]], - secure_channel: bool = False) -> grpc.Channel: + secure_channel: bool = False, + interceptors: Optional[Sequence[ClientInterceptor]] = None) -> grpc.Channel: + if host_address is None: host_address = get_default_host_address() @@ -44,16 +50,18 @@ def get_grpc_channel( host_address = host_address[len(protocol):] break + # Create the base channel if secure_channel: channel = grpc.secure_channel(host_address, grpc.ssl_channel_credentials()) else: channel = grpc.insecure_channel(host_address) - if metadata is not None and len(metadata) > 0: - interceptors = [DefaultClientInterceptorImpl(metadata)] + # Apply interceptors ONLY if they exist + if interceptors: channel = grpc.intercept_channel(channel, *interceptors) return channel + def get_logger( name_suffix: str, log_handler: Optional[logging.Handler] = None, @@ -98,7 +106,7 @@ def default(self, obj): if dataclasses.is_dataclass(obj): # Dataclasses are not serializable by default, so we convert them to a dict and mark them for # automatic deserialization by the receiver - d = dataclasses.asdict(obj) # type: ignore + d = dataclasses.asdict(obj) # type: ignore d[AUTO_SERIALIZED] = True return d elif isinstance(obj, SimpleNamespace): diff --git a/durabletask/task.py b/durabletask/task.py index a40602b..9e8a08a 100644 --- a/durabletask/task.py +++ b/durabletask/task.py @@ -277,6 +277,7 @@ def get_tasks(self) -> list[Task]: def on_child_completed(self, task: Task[T]): pass + class WhenAllTask(CompositeTask[list[T]]): """A task that completes when all of its child tasks complete.""" @@ -333,7 +334,7 @@ class RetryableTask(CompletableTask[T]): """A task that can be retried according to a retry policy.""" def __init__(self, retry_policy: RetryPolicy, action: pb.OrchestratorAction, - start_time:datetime, is_sub_orch: bool) -> None: + start_time: datetime, is_sub_orch: bool) -> None: super().__init__() self._action = action self._retry_policy = retry_policy @@ -343,7 +344,7 @@ def __init__(self, retry_policy: RetryPolicy, action: pb.OrchestratorAction, def increment_attempt_count(self) -> None: self._attempt_count += 1 - + def compute_next_delay(self) -> Optional[timedelta]: if self._attempt_count >= self._retry_policy.max_number_of_attempts: return None @@ -351,7 +352,7 @@ def compute_next_delay(self) -> Optional[timedelta]: retry_expiration: datetime = datetime.max if self._retry_policy.retry_timeout is not None and self._retry_policy.retry_timeout != datetime.max: retry_expiration = self._start_time + self._retry_policy.retry_timeout - + if self._retry_policy.backoff_coefficient is None: backoff_coefficient = 1.0 else: diff --git a/durabletask/worker.py b/durabletask/worker.py index 75e2e37..2c31e52 100644 --- a/durabletask/worker.py +++ b/durabletask/worker.py @@ -9,7 +9,7 @@ from typing import Any, Generator, Optional, Sequence, TypeVar, Union import grpc -from google.protobuf import empty_pb2, wrappers_pb2 +from google.protobuf import empty_pb2 import durabletask.internal.helpers as ph import durabletask.internal.helpers as pbh @@ -17,6 +17,7 @@ import durabletask.internal.orchestrator_service_pb2_grpc as stubs import durabletask.internal.shared as shared from durabletask import task +from durabletask.internal.grpc_interceptor import DefaultClientInterceptorImpl TInput = TypeVar('TInput') TOutput = TypeVar('TOutput') @@ -82,21 +83,32 @@ class ActivityNotRegisteredError(ValueError): class TaskHubGrpcWorker: _response_stream: Optional[grpc.Future] = None + _interceptors: Optional[list[shared.ClientInterceptor]] = None def __init__(self, *, host_address: Optional[str] = None, metadata: Optional[list[tuple[str, str]]] = None, log_handler=None, log_formatter: Optional[logging.Formatter] = None, - secure_channel: bool = False): + secure_channel: bool = False, + interceptors: Optional[Sequence[shared.ClientInterceptor]] = None): self._registry = _Registry() self._host_address = host_address if host_address else shared.get_default_host_address() - self._metadata = metadata self._logger = shared.get_logger("worker", log_handler, log_formatter) self._shutdown = Event() self._is_running = False self._secure_channel = secure_channel + # Determine the interceptors to use + if interceptors is not None: + self._interceptors = list(interceptors) + if metadata: + self._interceptors.append(DefaultClientInterceptorImpl(metadata)) + elif metadata: + self._interceptors = [DefaultClientInterceptorImpl(metadata)] + else: + self._interceptors = None + def __enter__(self): return self @@ -117,7 +129,7 @@ def add_activity(self, fn: task.Activity) -> str: def start(self): """Starts the worker on a background thread and begins listening for work items.""" - channel = shared.get_grpc_channel(self._host_address, self._metadata, self._secure_channel) + channel = shared.get_grpc_channel(self._host_address, self._secure_channel, self._interceptors) stub = stubs.TaskHubSidecarServiceStub(channel) if self._is_running: @@ -143,9 +155,11 @@ def run_loop(): request_type = work_item.WhichOneof('request') self._logger.debug(f'Received "{request_type}" work item') if work_item.HasField('orchestratorRequest'): - executor.submit(self._execute_orchestrator, work_item.orchestratorRequest, stub) + executor.submit(self._execute_orchestrator, work_item.orchestratorRequest, stub, work_item.completionToken) elif work_item.HasField('activityRequest'): - executor.submit(self._execute_activity, work_item.activityRequest, stub) + executor.submit(self._execute_activity, work_item.activityRequest, stub, work_item.completionToken) + elif work_item.HasField('healthPing'): + pass # no-op else: self._logger.warning(f'Unexpected work item type: {request_type}') @@ -184,26 +198,27 @@ def stop(self): self._logger.info("Worker shutdown completed") self._is_running = False - def _execute_orchestrator(self, req: pb.OrchestratorRequest, stub: stubs.TaskHubSidecarServiceStub): + def _execute_orchestrator(self, req: pb.OrchestratorRequest, stub: stubs.TaskHubSidecarServiceStub, completionToken): try: executor = _OrchestrationExecutor(self._registry, self._logger) result = executor.execute(req.instanceId, req.pastEvents, req.newEvents) res = pb.OrchestratorResponse( instanceId=req.instanceId, actions=result.actions, - customStatus=pbh.get_string_value(result.encoded_custom_status)) + customStatus=pbh.get_string_value(result.encoded_custom_status), + completionToken=completionToken) except Exception as ex: self._logger.exception(f"An error occurred while trying to execute instance '{req.instanceId}': {ex}") failure_details = pbh.new_failure_details(ex) actions = [pbh.new_complete_orchestration_action(-1, pb.ORCHESTRATION_STATUS_FAILED, "", failure_details)] - res = pb.OrchestratorResponse(instanceId=req.instanceId, actions=actions) + res = pb.OrchestratorResponse(instanceId=req.instanceId, actions=actions, completionToken=completionToken) try: stub.CompleteOrchestratorTask(res) except Exception as ex: self._logger.exception(f"Failed to deliver orchestrator response for '{req.instanceId}' to sidecar: {ex}") - def _execute_activity(self, req: pb.ActivityRequest, stub: stubs.TaskHubSidecarServiceStub): + def _execute_activity(self, req: pb.ActivityRequest, stub: stubs.TaskHubSidecarServiceStub, completionToken): instance_id = req.orchestrationInstance.instanceId try: executor = _ActivityExecutor(self._registry, self._logger) @@ -211,12 +226,14 @@ def _execute_activity(self, req: pb.ActivityRequest, stub: stubs.TaskHubSidecarS res = pb.ActivityResponse( instanceId=instance_id, taskId=req.taskId, - result=pbh.get_string_value(result)) + result=pbh.get_string_value(result), + completionToken=completionToken) except Exception as ex: res = pb.ActivityResponse( instanceId=instance_id, taskId=req.taskId, - failureDetails=pbh.new_failure_details(ex)) + failureDetails=pbh.new_failure_details(ex), + completionToken=completionToken) try: stub.CompleteActivityTask(res) @@ -471,6 +488,7 @@ def __init__(self, actions: list[pb.OrchestratorAction], encoded_custom_status: self.actions = actions self.encoded_custom_status = encoded_custom_status + class _OrchestrationExecutor: _generator: Optional[task.Orchestrator] = None diff --git a/examples/README.md b/examples/README.md index ec9088f..7cfbc7a 100644 --- a/examples/README.md +++ b/examples/README.md @@ -8,7 +8,7 @@ All the examples assume that you have a Durable Task-compatible sidecar running 1. Install the latest version of the [Dapr CLI](https://docs.dapr.io/getting-started/install-dapr-cli/), which contains and exposes an embedded version of the Durable Task engine. The setup process (which requires Docker) will configure the workflow engine to store state in a local Redis container. -1. Clone and run the [Durable Task Sidecar](https://github.com/microsoft/durabletask-go) project locally (requires Go 1.18 or higher). Orchestration state will be stored in a local sqlite database. +2. Clone and run the [Durable Task Sidecar](https://github.com/microsoft/durabletask-go) project locally (requires Go 1.18 or higher). Orchestration state will be stored in a local sqlite database. ## Running the examples diff --git a/examples/dts/README.md b/examples/dts/README.md new file mode 100644 index 0000000..9b4a3fd --- /dev/null +++ b/examples/dts/README.md @@ -0,0 +1,55 @@ +# Examples + +This directory contains examples of how to author durable orchestrations using the Durable Task Python SDK in conjunction with the Durable Task Scheduler (DTS). Please note that the installation instructions provided below will use the version of DTS directly from the your branch rather than installing through PyPI. + +## Prerequisites + +All the examples assume that you have a Durable Task Scheduler taskhub created. + +The simplest way to create a taskhub is by using the az cli commands: + +1. Create a scheduler: + az durabletask scheduler create --resource-group --name --location --ip-allowlist "[0.0.0.0/0]" --sku-capacity 1 --sku-name "Dedicated" --tags "{}" + +1. Create your taskhub + + ```bash + az durabletask taskhub create --resource-group --scheduler-name --name + ``` + +1. Retrieve the endpoint for the scheduler. This can be done by locating the taskhub in the portal. + +1. Set the appropriate environment variables for the TASKHUB and ENDPOINT + + ```bash + export TASKHUB= + export ENDPOINT= + ``` + +1. Since the samples rely on azure identity, ensure the package is installed and up-to-date + + ```bash + python3 -m pip install azure-identity + ``` + +1. Install the correct packages from the top level of this repository, i.e. durabletask-python/ + + ```bash + python3 -m pip install . + ``` + +1. Install the DTS specific packages from the durabletask-python/durabletask-azuremanaged directory + + ```bash + pip3 install -e . + ``` + +1. Grant yourself the `Durable Task Data Contributor` role over your scheduler + +## Running the examples + +Now, you can simply execute any of the examples in this directory using `python3`: + +```sh +python3 dts_activity_sequence.py +``` diff --git a/examples/dts/dts_activity_sequence.py b/examples/dts/dts_activity_sequence.py new file mode 100644 index 0000000..2ff3c22 --- /dev/null +++ b/examples/dts/dts_activity_sequence.py @@ -0,0 +1,71 @@ +"""End-to-end sample that demonstrates how to configure an orchestrator +that calls an activity function in a sequence and prints the outputs.""" +import os + +from azure.identity import DefaultAzureCredential + +from durabletask import client, task +from durabletask.azuremanaged.client import DurableTaskSchedulerClient +from durabletask.azuremanaged.worker import DurableTaskSchedulerWorker + + +def hello(ctx: task.ActivityContext, name: str) -> str: + """Activity function that returns a greeting""" + return f'Hello {name}!' + + +def sequence(ctx: task.OrchestrationContext, _): + """Orchestrator function that calls the 'hello' activity function in a sequence""" + # call "hello" activity function in a sequence + result1 = yield ctx.call_activity(hello, input='Tokyo') + result2 = yield ctx.call_activity(hello, input='Seattle') + result3 = yield ctx.call_activity(hello, input='London') + + # return an array of results + return [result1, result2, result3] + + +# Read the environment variable +taskhub_name = os.getenv("TASKHUB") + +# Check if the variable exists +if taskhub_name: + print(f"The value of TASKHUB is: {taskhub_name}") +else: + print("TASKHUB is not set. Please set the TASKHUB environment variable to the name of the taskhub you wish to use") + print("If you are using windows powershell, run the following: $env:TASKHUB=\"\"") + print("If you are using bash, run the following: export TASKHUB=\"\"") + exit() + +# Read the environment variable +endpoint = os.getenv("ENDPOINT") + +# Check if the variable exists +if endpoint: + print(f"The value of ENDPOINT is: {endpoint}") +else: + print("ENDPOINT is not set. Please set the ENDPOINT environment variable to the endpoint of the scheduler") + print("If you are using windows powershell, run the following: $env:ENDPOINT=\"\"") + print("If you are using bash, run the following: export ENDPOINT=\"\"") + exit() + +# Note that any azure-identity credential type and configuration can be used here as DTS supports various credential +# types such as Managed Identities +credential = DefaultAzureCredential() + +# configure and start the worker +with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=credential) as w: + w.add_orchestrator(sequence) + w.add_activity(hello) + w.start() + + # Construct the client and run the orchestrations + c = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=credential) + instance_id = c.schedule_new_orchestration(sequence) + state = c.wait_for_orchestration_completion(instance_id, timeout=60) + if state and state.runtime_status == client.OrchestrationStatus.COMPLETED: + print(f'Orchestration completed! Result: {state.serialized_output}') + elif state: + print(f'Orchestration failed: {state.failure_details}') diff --git a/examples/dts/dts_fanout_fanin.py b/examples/dts/dts_fanout_fanin.py new file mode 100644 index 0000000..8ab68df --- /dev/null +++ b/examples/dts/dts_fanout_fanin.py @@ -0,0 +1,96 @@ +"""End-to-end sample that demonstrates how to configure an orchestrator +that a dynamic number activity functions in parallel, waits for them all +to complete, and prints an aggregate summary of the outputs.""" +import os +import random +import time + +from azure.identity import DefaultAzureCredential + +from durabletask import client, task +from durabletask.azuremanaged.client import DurableTaskSchedulerClient +from durabletask.azuremanaged.worker import DurableTaskSchedulerWorker + + +def get_work_items(ctx: task.ActivityContext, _) -> list[str]: + """Activity function that returns a list of work items""" + # return a random number of work items + count = random.randint(2, 10) + print(f'generating {count} work items...') + return [f'work item {i}' for i in range(count)] + + +def process_work_item(ctx: task.ActivityContext, item: str) -> int: + """Activity function that returns a result for a given work item""" + print(f'processing work item: {item}') + + # simulate some work that takes a variable amount of time + time.sleep(random.random() * 5) + + # return a result for the given work item, which is also a random number in this case + return random.randint(0, 10) + + +def orchestrator(ctx: task.OrchestrationContext, _): + """Orchestrator function that calls the 'get_work_items' and 'process_work_item' + activity functions in parallel, waits for them all to complete, and prints + an aggregate summary of the outputs""" + + work_items: list[str] = yield ctx.call_activity(get_work_items) + + # execute the work-items in parallel and wait for them all to return + tasks = [ctx.call_activity(process_work_item, input=item) for item in work_items] + results: list[int] = yield task.when_all(tasks) + + # return an aggregate summary of the results + return { + 'work_items': work_items, + 'results': results, + 'total': sum(results), + } + + +# Read the environment variable +taskhub_name = os.getenv("TASKHUB") + +# Check if the variable exists +if taskhub_name: + print(f"The value of TASKHUB is: {taskhub_name}") +else: + print("TASKHUB is not set. Please set the TASKHUB environment variable to the name of the taskhub you wish to use") + print("If you are using windows powershell, run the following: $env:TASKHUB=\"\"") + print("If you are using bash, run the following: export TASKHUB=\"\"") + exit() + +# Read the environment variable +endpoint = os.getenv("ENDPOINT") + +# Check if the variable exists +if endpoint: + print(f"The value of ENDPOINT is: {endpoint}") +else: + print("ENDPOINT is not set. Please set the ENDPOINT environment variable to the endpoint of the scheduler") + print("If you are using windows powershell, run the following: $env:ENDPOINT=\"\"") + print("If you are using bash, run the following: export ENDPOINT=\"\"") + exit() + +credential = DefaultAzureCredential() + +# configure and start the worker +with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=credential) as w: + w.add_orchestrator(orchestrator) + w.add_activity(process_work_item) + w.add_activity(get_work_items) + w.start() + + # create a client, start an orchestration, and wait for it to finish + c = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=credential) + instance_id = c.schedule_new_orchestration(orchestrator) + state = c.wait_for_orchestration_completion(instance_id, timeout=30) + if state and state.runtime_status == client.OrchestrationStatus.COMPLETED: + print(f'Orchestration completed! Result: {state.serialized_output}') + elif state: + print(f'Orchestration failed: {state.failure_details}') + exit() diff --git a/requirements.txt b/requirements.txt index a31419b..0da7d46 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,3 +3,5 @@ grpcio>=1.60.0 # 1.60.0 is the version introducing protobuf 1.25.X support, newe protobuf pytest pytest-cov +azure-core +azure-identity \ No newline at end of file diff --git a/tests/test_client.py b/tests/test_client.py index caacf65..64bbec8 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -1,36 +1,36 @@ from unittest.mock import patch, ANY -from durabletask.internal.shared import (DefaultClientInterceptorImpl, - get_default_host_address, +from durabletask.internal.shared import (get_default_host_address, get_grpc_channel) +from durabletask.internal.grpc_interceptor import DefaultClientInterceptorImpl HOST_ADDRESS = 'localhost:50051' METADATA = [('key1', 'value1'), ('key2', 'value2')] - +INTERCEPTORS = [DefaultClientInterceptorImpl(METADATA)] def test_get_grpc_channel_insecure(): with patch('grpc.insecure_channel') as mock_channel: - get_grpc_channel(HOST_ADDRESS, METADATA, False) + get_grpc_channel(HOST_ADDRESS, False, interceptors=INTERCEPTORS) mock_channel.assert_called_once_with(HOST_ADDRESS) def test_get_grpc_channel_secure(): with patch('grpc.secure_channel') as mock_channel, patch( 'grpc.ssl_channel_credentials') as mock_credentials: - get_grpc_channel(HOST_ADDRESS, METADATA, True) + get_grpc_channel(HOST_ADDRESS, True, interceptors=INTERCEPTORS) mock_channel.assert_called_once_with(HOST_ADDRESS, mock_credentials.return_value) def test_get_grpc_channel_default_host_address(): with patch('grpc.insecure_channel') as mock_channel: - get_grpc_channel(None, METADATA, False) + get_grpc_channel(None, False, interceptors=INTERCEPTORS) mock_channel.assert_called_once_with(get_default_host_address()) def test_get_grpc_channel_with_metadata(): with patch('grpc.insecure_channel') as mock_channel, patch( 'grpc.intercept_channel') as mock_intercept_channel: - get_grpc_channel(HOST_ADDRESS, METADATA, False) + get_grpc_channel(HOST_ADDRESS, False, interceptors=INTERCEPTORS) mock_channel.assert_called_once_with(HOST_ADDRESS) mock_intercept_channel.assert_called_once() @@ -48,41 +48,41 @@ def test_grpc_channel_with_host_name_protocol_stripping(): host_name = "myserver.com:1234" prefix = "grpc://" - get_grpc_channel(prefix + host_name, METADATA) + get_grpc_channel(prefix + host_name, interceptors=INTERCEPTORS) mock_insecure_channel.assert_called_with(host_name) prefix = "http://" - get_grpc_channel(prefix + host_name, METADATA) + get_grpc_channel(prefix + host_name, interceptors=INTERCEPTORS) mock_insecure_channel.assert_called_with(host_name) prefix = "HTTP://" - get_grpc_channel(prefix + host_name, METADATA) + get_grpc_channel(prefix + host_name, interceptors=INTERCEPTORS) mock_insecure_channel.assert_called_with(host_name) prefix = "GRPC://" - get_grpc_channel(prefix + host_name, METADATA) + get_grpc_channel(prefix + host_name, interceptors=INTERCEPTORS) mock_insecure_channel.assert_called_with(host_name) prefix = "" - get_grpc_channel(prefix + host_name, METADATA) + get_grpc_channel(prefix + host_name, interceptors=INTERCEPTORS) mock_insecure_channel.assert_called_with(host_name) prefix = "grpcs://" - get_grpc_channel(prefix + host_name, METADATA) + get_grpc_channel(prefix + host_name, interceptors=INTERCEPTORS) mock_secure_channel.assert_called_with(host_name, ANY) prefix = "https://" - get_grpc_channel(prefix + host_name, METADATA) + get_grpc_channel(prefix + host_name, interceptors=INTERCEPTORS) mock_secure_channel.assert_called_with(host_name, ANY) prefix = "HTTPS://" - get_grpc_channel(prefix + host_name, METADATA) + get_grpc_channel(prefix + host_name, interceptors=INTERCEPTORS) mock_secure_channel.assert_called_with(host_name, ANY) prefix = "GRPCS://" - get_grpc_channel(prefix + host_name, METADATA) + get_grpc_channel(prefix + host_name, interceptors=INTERCEPTORS) mock_secure_channel.assert_called_with(host_name, ANY) prefix = "" - get_grpc_channel(prefix + host_name, METADATA, True) + get_grpc_channel(prefix + host_name, True, interceptors=INTERCEPTORS) mock_secure_channel.assert_called_with(host_name, ANY) \ No newline at end of file From 6d3ad8f06af017ee9286b3c2b35e80ac164f65bd Mon Sep 17 00:00:00 2001 From: Bernd Verst Date: Mon, 10 Mar 2025 13:56:24 -0700 Subject: [PATCH 19/81] Update pr-validation.yml Signed-off-by: Albert Callarisa --- .github/workflows/pr-validation.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/pr-validation.yml b/.github/workflows/pr-validation.yml index 70ff470..4b909cf 100644 --- a/.github/workflows/pr-validation.yml +++ b/.github/workflows/pr-validation.yml @@ -8,6 +8,7 @@ on: branches: [ "main" ] pull_request: branches: [ "main" ] + merge_group: jobs: build: From 75f573bc67244856b2990d4c0916d9a868b41708 Mon Sep 17 00:00:00 2001 From: Ryan Lettieri <67934986+RyanLettieri@users.noreply.github.com> Date: Mon, 10 Mar 2025 14:56:59 -0600 Subject: [PATCH 20/81] Making token credential optional (#45) Signed-off-by: Ryan Lettieri Signed-off-by: Albert Callarisa --- durabletask-azuremanaged/durabletask/azuremanaged/client.py | 3 ++- .../azuremanaged/internal/durabletask_grpc_interceptor.py | 4 +++- durabletask-azuremanaged/durabletask/azuremanaged/worker.py | 3 ++- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/durabletask-azuremanaged/durabletask/azuremanaged/client.py b/durabletask-azuremanaged/durabletask/azuremanaged/client.py index f641eae..1d8cecd 100644 --- a/durabletask-azuremanaged/durabletask/azuremanaged/client.py +++ b/durabletask-azuremanaged/durabletask/azuremanaged/client.py @@ -2,6 +2,7 @@ # Licensed under the MIT License. from azure.core.credentials import TokenCredential +from typing import Optional from durabletask.azuremanaged.internal.durabletask_grpc_interceptor import \ DTSDefaultClientInterceptorImpl @@ -13,7 +14,7 @@ class DurableTaskSchedulerClient(TaskHubGrpcClient): def __init__(self, *, host_address: str, taskhub: str, - token_credential: TokenCredential, + token_credential: Optional[TokenCredential], secure_channel: bool = True): if not taskhub: diff --git a/durabletask-azuremanaged/durabletask/azuremanaged/internal/durabletask_grpc_interceptor.py b/durabletask-azuremanaged/durabletask/azuremanaged/internal/durabletask_grpc_interceptor.py index a23cac9..077905e 100644 --- a/durabletask-azuremanaged/durabletask/azuremanaged/internal/durabletask_grpc_interceptor.py +++ b/durabletask-azuremanaged/durabletask/azuremanaged/internal/durabletask_grpc_interceptor.py @@ -2,6 +2,8 @@ # Licensed under the MIT License. import grpc +from typing import Optional + from azure.core.credentials import TokenCredential from durabletask.azuremanaged.internal.access_token_manager import \ @@ -15,7 +17,7 @@ class DTSDefaultClientInterceptorImpl (DefaultClientInterceptorImpl): StreamUnaryClientInterceptor and StreamStreamClientInterceptor from grpc to add an interceptor to add additional headers to all calls as needed.""" - def __init__(self, token_credential: TokenCredential, taskhub_name: str): + def __init__(self, token_credential: Optional[TokenCredential], taskhub_name: str): self._metadata = [("taskhub", taskhub_name)] super().__init__(self._metadata) diff --git a/durabletask-azuremanaged/durabletask/azuremanaged/worker.py b/durabletask-azuremanaged/durabletask/azuremanaged/worker.py index d10c2f7..8bdff3d 100644 --- a/durabletask-azuremanaged/durabletask/azuremanaged/worker.py +++ b/durabletask-azuremanaged/durabletask/azuremanaged/worker.py @@ -2,6 +2,7 @@ # Licensed under the MIT License. from azure.core.credentials import TokenCredential +from typing import Optional from durabletask.azuremanaged.internal.durabletask_grpc_interceptor import \ DTSDefaultClientInterceptorImpl @@ -13,7 +14,7 @@ class DurableTaskSchedulerWorker(TaskHubGrpcWorker): def __init__(self, *, host_address: str, taskhub: str, - token_credential: TokenCredential, + token_credential: Optional[TokenCredential], secure_channel: bool = True): if not taskhub: From aae026732bfb8bb54928d1c5a9052d50c66c7e8e Mon Sep 17 00:00:00 2001 From: Ryan Lettieri <67934986+RyanLettieri@users.noreply.github.com> Date: Fri, 21 Mar 2025 12:22:08 -0600 Subject: [PATCH 21/81] Creation of pipeline to publish dts python package to pypi (#43) * Creating of pipeline to publish dts python package to pypi Signed-off-by: Ryan Lettieri * Upgrading version of durabletask-azuremanaged from 0.1b1 to 0.1 Signed-off-by: Ryan Lettieri * Updating versioning on packages Signed-off-by: Ryan Lettieri * Incrementing version to allign with pypi Signed-off-by: Ryan Lettieri * Adressing majority of first round of feedback Signed-off-by: Ryan Lettieri * Updating pipeline to have linting Signed-off-by: Ryan Lettieri * Updating versions in pyproject.toml Signed-off-by: Ryan Lettieri * Updating working dirs in yml Signed-off-by: Ryan Lettieri * Adding requirements.txt Signed-off-by: Ryan Lettieri * Moving durabletask tests into specific dir and more Signed-off-by: Ryan Lettieri * Fixing more paths Signed-off-by: Ryan Lettieri * ATtemptign to ignore durabletask-azuremanaged folder Signed-off-by: Ryan Lettieri * installing dts dependencies Signed-off-by: Ryan Lettieri * Changing path for requirements.txt Signed-off-by: Ryan Lettieri * Moving init.py Signed-off-by: Ryan Lettieri * Updating readme and some tests Signed-off-by: Ryan Lettieri * Running all dts tests in publish pipeline Signed-off-by: Ryan Lettieri * Removing PYTHONPATH and installing regular deps Signed-off-by: Ryan Lettieri * Adding timeout to dts orchestration e2e test Signed-off-by: Ryan Lettieri * Removing suspend and continue as new tests from dts Signed-off-by: Ryan Lettieri * Removing raise event timeout tests Signed-off-by: Ryan Lettieri * Only runnign publish on tag push Signed-off-by: Ryan Lettieri * Changing dts action to run on tag creation Signed-off-by: Ryan Lettieri * Updating tag name Signed-off-by: Ryan Lettieri * Adressing review feedback Signed-off-by: Ryan Lettieri * Fixing run requirements in actions and adding exit-zero Signed-off-by: Ryan Lettieri * Update .github/workflows/publish-dts-sdk.yml --------- Signed-off-by: Ryan Lettieri Co-authored-by: Bernd Verst Signed-off-by: Albert Callarisa --- .github/workflows/pr-validation.yml | 109 ++-- .github/workflows/publish-dts-sdk.yml | 110 ++++ durabletask-azuremanaged/pyproject.toml | 2 +- examples/dts/README.md | 30 +- examples/dts/requirements.txt | 6 + pyproject.toml | 2 +- tests/durabletask-azuremanaged/__init__.py | 0 .../test_dts_activity_sequence.py | 69 +++ .../test_dts_orchestration_e2e.py | 503 ++++++++++++++++++ .../test_activity_executor.py | 0 tests/{ => durabletask}/test_client.py | 0 .../test_orchestration_e2e.py | 0 .../test_orchestration_executor.py | 0 13 files changed, 777 insertions(+), 54 deletions(-) create mode 100644 .github/workflows/publish-dts-sdk.yml create mode 100644 examples/dts/requirements.txt create mode 100644 tests/durabletask-azuremanaged/__init__.py create mode 100644 tests/durabletask-azuremanaged/test_dts_activity_sequence.py create mode 100644 tests/durabletask-azuremanaged/test_dts_orchestration_e2e.py rename tests/{ => durabletask}/test_activity_executor.py (100%) rename tests/{ => durabletask}/test_client.py (100%) rename tests/{ => durabletask}/test_orchestration_e2e.py (100%) rename tests/{ => durabletask}/test_orchestration_executor.py (100%) diff --git a/.github/workflows/pr-validation.yml b/.github/workflows/pr-validation.yml index 4b909cf..dddcc53 100644 --- a/.github/workflows/pr-validation.yml +++ b/.github/workflows/pr-validation.yml @@ -1,51 +1,58 @@ -# This workflow will install Python dependencies, run tests and lint with a variety of Python versions -# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python - -name: Build Validation - -on: - push: - branches: [ "main" ] - pull_request: - branches: [ "main" ] - merge_group: - -jobs: - build: - - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] - - steps: - - uses: actions/checkout@v4 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v5 - with: - python-version: ${{ matrix.python-version }} - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install flake8 pytest - pip install -r requirements.txt - - name: Lint with flake8 - run: | - flake8 . --count --show-source --statistics --exit-zero - - name: Pytest unit tests - run: | - pytest -m "not e2e" --verbose - - # Sidecar for running e2e tests requires Go SDK - - name: Install Go SDK - uses: actions/setup-go@v5 - with: - go-version: 'stable' - - # Install and run the durabletask-go sidecar for running e2e tests - - name: Pytest e2e tests - run: | - go install github.com/microsoft/durabletask-go@main - durabletask-go --port 4001 & - pytest -m "e2e" --verbose +# This workflow will install Python dependencies, run tests and lint with a variety of Python versions +# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python + +name: Build Validation + +on: + push: + branches: [ "main" ] + pull_request: + branches: [ "main" ] + merge_group: + +jobs: + build: + + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] + + steps: + - uses: actions/checkout@v4 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + - name: Install durabletask dependencies + run: | + python -m pip install --upgrade pip + pip install flake8 pytest + pip install -r requirements.txt + - name: Install durabletask-azuremanaged dependencies + working-directory: examples/dts + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + - name: Lint with flake8 + run: | + flake8 . --count --show-source --statistics --exit-zero + - name: Pytest unit tests + working-directory: tests/durabletask + run: | + pytest -m "not e2e and not dts" --verbose + + # Sidecar for running e2e tests requires Go SDK + - name: Install Go SDK + uses: actions/setup-go@v5 + with: + go-version: 'stable' + + # Install and run the durabletask-go sidecar for running e2e tests + - name: Pytest e2e tests + working-directory: tests/durabletask + run: | + go install github.com/microsoft/durabletask-go@main + durabletask-go --port 4001 & + pytest -m "e2e and not dts" --verbose diff --git a/.github/workflows/publish-dts-sdk.yml b/.github/workflows/publish-dts-sdk.yml new file mode 100644 index 0000000..de773f2 --- /dev/null +++ b/.github/workflows/publish-dts-sdk.yml @@ -0,0 +1,110 @@ +name: Publish Durable Task Scheduler to PyPI + +on: + push: + branches: + - "main" + tags: + - "azuremanaged-v*" # Only run for tags starting with "azuremanaged-v" + pull_request: + branches: + - "main" + +jobs: + lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Set up Python 3.12 + uses: actions/setup-python@v5 + with: + python-version: 3.12 + - name: Install dependencies + working-directory: durabletask-azuremanaged + run: | + python -m pip install --upgrade pip + pip install setuptools wheel tox + pip install flake8 + - name: Run flake8 Linter + working-directory: durabletask-azuremanaged + run: flake8 . + + run-docker-tests: + env: + EMULATOR_VERSION: "v0.0.5" # Define the variable + needs: lint + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Pull Docker image + run: docker pull mcr.microsoft.com/dts/dts-emulator:$EMULATOR_VERSION + + - name: Run Docker container + run: | + docker run --name dtsemulator -d -p 8080:8080 mcr.microsoft.com/dts/dts-emulator:$EMULATOR_VERSION + + - name: Wait for container to be ready + run: sleep 10 # Adjust if your service needs more time to start + + - name: Set environment variables + run: | + echo "TASKHUB=default" >> $GITHUB_ENV + echo "ENDPOINT=http://localhost:8080" >> $GITHUB_ENV + + - name: Install durabletask dependencies + run: | + python -m pip install --upgrade pip + pip install flake8 pytest + pip install -r requirements.txt + + - name: Install durabletask-azuremanaged dependencies + working-directory: examples/dts + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + + - name: Run the tests + working-directory: tests/durabletask-azuremanaged + run: | + pytest -m "dts" --verbose + + publish: + if: startsWith(github.ref, 'refs/tags/azuremanaged-v') # Only run if a matching tag is pushed + needs: run-docker-tests + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Extract version from tag + run: echo "VERSION=${GITHUB_REF#refs/tags/azuremanaged-v}" >> $GITHUB_ENV # Extract version from the tag + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.12" # Adjust Python version as needed + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install build twine + + - name: Build package from directory durabletask-azuremanaged + working-directory: durabletask-azuremanaged + run: | + python -m build + + - name: Check package + working-directory: durabletask-azuremanaged + run: | + twine check dist/* + + - name: Publish package to PyPI + env: + TWINE_USERNAME: __token__ + TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN_AZUREMANAGED }} # Store your PyPI API token in GitHub Secrets + working-directory: durabletask-azuremanaged + run: | + twine upload dist/* \ No newline at end of file diff --git a/durabletask-azuremanaged/pyproject.toml b/durabletask-azuremanaged/pyproject.toml index ac6be6f..c4c8a96 100644 --- a/durabletask-azuremanaged/pyproject.toml +++ b/durabletask-azuremanaged/pyproject.toml @@ -9,7 +9,7 @@ build-backend = "setuptools.build_meta" [project] name = "durabletask.azuremanaged" -version = "0.1b1" +version = "0.1.2" description = "Extensions for the Durable Task Python SDK for integrating with the Durable Task Scheduler in Azure" keywords = [ "durable", diff --git a/examples/dts/README.md b/examples/dts/README.md index 9b4a3fd..8df2b75 100644 --- a/examples/dts/README.md +++ b/examples/dts/README.md @@ -4,8 +4,13 @@ This directory contains examples of how to author durable orchestrations using t ## Prerequisites -All the examples assume that you have a Durable Task Scheduler taskhub created. +There are 2 separate ways to run an example: +1. Using the emulator. +2. Using a real scheduler and taskhub. +All the examples by defualt assume that you have a Durable Task Scheduler taskhub created. + +## Running with a scheduler and taskhub resource The simplest way to create a taskhub is by using the az cli commands: 1. Create a scheduler: @@ -46,6 +51,29 @@ The simplest way to create a taskhub is by using the az cli commands: 1. Grant yourself the `Durable Task Data Contributor` role over your scheduler +## Running with the emulator +The emulator is a simulation of a scheduler and taskhub. It is the 'backend' of the durabletask-azuremanaged system packaged up into an easy to use docker container. For these steps, it is assumed that you are using port 8080. + +In order to use the emulator for the examples, perform the following steps: +1. Install docker if it is not already installed. + +2. Pull down the docker image for the emulator: + `docker pull mcr.microsoft.com/dts/dts-emulator:v0.0.4` + +3. Run the emulator and wait a few seconds for the container to be ready: +`docker run --name dtsemulator -d -p 8080:8080 mcr.microsoft.com/dts/dts-emulator:v0.0.4` + +4. Set the environment variables that are referenced and used in the examples: + 1. If you are using windows powershell: + `$env:TASKHUB="default"` + `$env:ENDPOINT="http://localhost:8080"` + 2. If you are using bash: + `export TASKHUB=default` + `export ENDPOINT=http://localhost:8080` + +5. Finally, edit the examples to change the `token_credential` input of both the `DurableTaskSchedulerWorker` and `DurableTaskSchedulerClient` to a value of `None` + + ## Running the examples Now, you can simply execute any of the examples in this directory using `python3`: diff --git a/examples/dts/requirements.txt b/examples/dts/requirements.txt new file mode 100644 index 0000000..b12d5a2 --- /dev/null +++ b/examples/dts/requirements.txt @@ -0,0 +1,6 @@ +autopep8 +grpcio>=1.60.0 # 1.60.0 is the version introducing protobuf 1.25.X support, newer versions are backwards compatible +protobuf +azure-identity +durabletask-azuremanaged +durabletask \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 577824b..d3d9429 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -9,7 +9,7 @@ build-backend = "setuptools.build_meta" [project] name = "durabletask" -version = "0.2b1" +version = "0.2.0" description = "A Durable Task Client SDK for Python" keywords = [ "durable", diff --git a/tests/durabletask-azuremanaged/__init__.py b/tests/durabletask-azuremanaged/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/durabletask-azuremanaged/test_dts_activity_sequence.py b/tests/durabletask-azuremanaged/test_dts_activity_sequence.py new file mode 100644 index 0000000..c875e49 --- /dev/null +++ b/tests/durabletask-azuremanaged/test_dts_activity_sequence.py @@ -0,0 +1,69 @@ +"""End-to-end sample that demonstrates how to configure an orchestrator +that calls an activity function in a sequence and prints the outputs.""" +import os + +from durabletask import client, task +from durabletask.azuremanaged.client import DurableTaskSchedulerClient +from durabletask.azuremanaged.worker import DurableTaskSchedulerWorker + +import pytest + + +pytestmark = pytest.mark.dts + +def hello(ctx: task.ActivityContext, name: str) -> str: + """Activity function that returns a greeting""" + return f'Hello {name}!' + + +def sequence(ctx: task.OrchestrationContext, _): + """Orchestrator function that calls the 'hello' activity function in a sequence""" + # call "hello" activity function in a sequence + result1 = yield ctx.call_activity(hello, input='Tokyo') + result2 = yield ctx.call_activity(hello, input='Seattle') + result3 = yield ctx.call_activity(hello, input='London') + + # return an array of results + return [result1, result2, result3] + + +# Read the environment variable +taskhub_name = os.getenv("TASKHUB") + +# Check if the variable exists +if taskhub_name: + print(f"The value of TASKHUB is: {taskhub_name}") +else: + print("TASKHUB is not set. Please set the TASKHUB environment variable to the name of the taskhub you wish to use") + print("If you are using windows powershell, run the following: $env:TASKHUB=\"\"") + print("If you are using bash, run the following: export TASKHUB=\"\"") + exit() + +# Read the environment variable +endpoint = os.getenv("ENDPOINT") + +# Check if the variable exists +if endpoint: + print(f"The value of ENDPOINT is: {endpoint}") +else: + print("ENDPOINT is not set. Please set the ENDPOINT environment variable to the endpoint of the scheduler") + print("If you are using windows powershell, run the following: $env:ENDPOINT=\"\"") + print("If you are using bash, run the following: export ENDPOINT=\"\"") + exit() + +# configure and start the worker +with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=None) as w: + w.add_orchestrator(sequence) + w.add_activity(hello) + w.start() + + # Construct the client and run the orchestrations + c = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=None) + instance_id = c.schedule_new_orchestration(sequence) + state = c.wait_for_orchestration_completion(instance_id, timeout=60) + if state and state.runtime_status == client.OrchestrationStatus.COMPLETED: + print(f'Orchestration completed! Result: {state.serialized_output}') + elif state: + print(f'Orchestration failed: {state.failure_details}') diff --git a/tests/durabletask-azuremanaged/test_dts_orchestration_e2e.py b/tests/durabletask-azuremanaged/test_dts_orchestration_e2e.py new file mode 100644 index 0000000..f10e605 --- /dev/null +++ b/tests/durabletask-azuremanaged/test_dts_orchestration_e2e.py @@ -0,0 +1,503 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +import json +import threading +import time +import os +from datetime import timedelta + +import pytest + +from durabletask import client, task +from durabletask.azuremanaged.client import DurableTaskSchedulerClient +from durabletask.azuremanaged.worker import DurableTaskSchedulerWorker + +# NOTE: These tests assume a sidecar process is running. Example command: +# docker run --name durabletask-sidecar -p 4001:4001 --env 'DURABLETASK_SIDECAR_LOGLEVEL=Debug' --rm cgillum/durabletask-sidecar:latest start --backend Emulator +pytestmark = pytest.mark.dts + +# Read the environment variables +taskhub_name = os.getenv("TASKHUB", "default") +endpoint = os.getenv("ENDPOINT", "http://localhost:8080") + +def test_empty_orchestration(): + + invoked = False + + def empty_orchestrator(ctx: task.OrchestrationContext, _): + nonlocal invoked # don't do this in a real app! + invoked = True + + # Start a worker, which will connect to the sidecar in a background thread + with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=None) as w: + w.add_orchestrator(empty_orchestrator) + w.start() + + c = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=None) + id = c.schedule_new_orchestration(empty_orchestrator) + state = c.wait_for_orchestration_completion(id, timeout=30) + + assert invoked + assert state is not None + assert state.name == task.get_name(empty_orchestrator) + assert state.instance_id == id + assert state.failure_details is None + assert state.runtime_status == client.OrchestrationStatus.COMPLETED + assert state.serialized_input is None + assert state.serialized_output is None + assert state.serialized_custom_status is None + + +def test_activity_sequence(): + + def plus_one(_: task.ActivityContext, input: int) -> int: + return input + 1 + + def sequence(ctx: task.OrchestrationContext, start_val: int): + numbers = [start_val] + current = start_val + for _ in range(10): + current = yield ctx.call_activity(plus_one, input=current) + numbers.append(current) + return numbers + + # Start a worker, which will connect to the sidecar in a background thread + with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=None) as w: + w.add_orchestrator(sequence) + w.add_activity(plus_one) + w.start() + + task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=None) + id = task_hub_client.schedule_new_orchestration(sequence, input=1) + state = task_hub_client.wait_for_orchestration_completion( + id, timeout=30) + + assert state is not None + assert state.name == task.get_name(sequence) + assert state.instance_id == id + assert state.runtime_status == client.OrchestrationStatus.COMPLETED + assert state.failure_details is None + assert state.serialized_input == json.dumps(1) + assert state.serialized_output == json.dumps([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) + assert state.serialized_custom_status is None + + +def test_activity_error_handling(): + + def throw(_: task.ActivityContext, input: int) -> int: + raise RuntimeError("Kah-BOOOOM!!!") + + compensation_counter = 0 + + def increment_counter(ctx, _): + nonlocal compensation_counter + compensation_counter += 1 + + def orchestrator(ctx: task.OrchestrationContext, input: int): + error_msg = "" + try: + yield ctx.call_activity(throw, input=input) + except task.TaskFailedError as e: + error_msg = e.details.message + + # compensating actions + yield ctx.call_activity(increment_counter) + yield ctx.call_activity(increment_counter) + + return error_msg + + # Start a worker, which will connect to the sidecar in a background thread + with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=None) as w: + w.add_orchestrator(orchestrator) + w.add_activity(throw) + w.add_activity(increment_counter) + w.start() + + task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=None) + id = task_hub_client.schedule_new_orchestration(orchestrator, input=1) + state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) + + assert state is not None + assert state.name == task.get_name(orchestrator) + assert state.instance_id == id + assert state.runtime_status == client.OrchestrationStatus.COMPLETED + assert state.serialized_output == json.dumps("Kah-BOOOOM!!!") + assert state.failure_details is None + assert state.serialized_custom_status is None + assert compensation_counter == 2 + + +def test_sub_orchestration_fan_out(): + threadLock = threading.Lock() + activity_counter = 0 + + def increment(ctx, _): + with threadLock: + nonlocal activity_counter + activity_counter += 1 + + def orchestrator_child(ctx: task.OrchestrationContext, activity_count: int): + for _ in range(activity_count): + yield ctx.call_activity(increment) + + def parent_orchestrator(ctx: task.OrchestrationContext, count: int): + # Fan out to multiple sub-orchestrations + tasks = [] + for _ in range(count): + tasks.append(ctx.call_sub_orchestrator( + orchestrator_child, input=3)) + # Wait for all sub-orchestrations to complete + yield task.when_all(tasks) + + # Start a worker, which will connect to the sidecar in a background thread + with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=None) as w: + w.add_activity(increment) + w.add_orchestrator(orchestrator_child) + w.add_orchestrator(parent_orchestrator) + w.start() + + task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=None) + id = task_hub_client.schedule_new_orchestration(parent_orchestrator, input=10) + state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) + + assert state is not None + assert state.runtime_status == client.OrchestrationStatus.COMPLETED + assert state.failure_details is None + assert activity_counter == 30 + + +def test_wait_for_multiple_external_events(): + def orchestrator(ctx: task.OrchestrationContext, _): + a = yield ctx.wait_for_external_event('A') + b = yield ctx.wait_for_external_event('B') + c = yield ctx.wait_for_external_event('C') + return [a, b, c] + + # Start a worker, which will connect to the sidecar in a background thread + with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=None) as w: + w.add_orchestrator(orchestrator) + w.start() + + # Start the orchestration and immediately raise events to it. + task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=None) + id = task_hub_client.schedule_new_orchestration(orchestrator) + task_hub_client.raise_orchestration_event(id, 'A', data='a') + task_hub_client.raise_orchestration_event(id, 'B', data='b') + task_hub_client.raise_orchestration_event(id, 'C', data='c') + state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) + + assert state is not None + assert state.runtime_status == client.OrchestrationStatus.COMPLETED + assert state.serialized_output == json.dumps(['a', 'b', 'c']) + + +# @pytest.mark.parametrize("raise_event", [True, False]) +# def test_wait_for_external_event_timeout(raise_event: bool): +# def orchestrator(ctx: task.OrchestrationContext, _): +# approval: task.Task[bool] = ctx.wait_for_external_event('Approval') +# timeout = ctx.create_timer(timedelta(seconds=3)) +# winner = yield task.when_any([approval, timeout]) +# if winner == approval: +# return "approved" +# else: +# return "timed out" + +# # Start a worker, which will connect to the sidecar in a background thread +# with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, +# taskhub=taskhub_name, token_credential=None) as w: +# w.add_orchestrator(orchestrator) +# w.start() + +# # Start the orchestration and immediately raise events to it. +# task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, +# taskhub=taskhub_name, token_credential=None) +# id = task_hub_client.schedule_new_orchestration(orchestrator) +# if raise_event: +# task_hub_client.raise_orchestration_event(id, 'Approval') +# state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) + +# assert state is not None +# assert state.runtime_status == client.OrchestrationStatus.COMPLETED +# if raise_event: +# assert state.serialized_output == json.dumps("approved") +# else: +# assert state.serialized_output == json.dumps("timed out") + + +# def test_suspend_and_resume(): +# def orchestrator(ctx: task.OrchestrationContext, _): +# result = yield ctx.wait_for_external_event("my_event") +# return result + +# # Start a worker, which will connect to the sidecar in a background thread +# with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, +# taskhub=taskhub_name, token_credential=None) as w: +# w.add_orchestrator(orchestrator) +# w.start() + +# task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, +# taskhub=taskhub_name, token_credential=None) +# id = task_hub_client.schedule_new_orchestration(orchestrator) +# state = task_hub_client.wait_for_orchestration_start(id, timeout=30) +# assert state is not None + +# # Suspend the orchestration and wait for it to go into the SUSPENDED state +# task_hub_client.suspend_orchestration(id) +# counter = 0 +# while state.runtime_status == client.OrchestrationStatus.RUNNING and counter < 1200: +# time.sleep(0.1) +# state = task_hub_client.get_orchestration_state(id) +# assert state is not None +# counter+=1 +# assert state.runtime_status == client.OrchestrationStatus.SUSPENDED + +# # Raise an event to the orchestration and confirm that it does NOT complete +# task_hub_client.raise_orchestration_event(id, "my_event", data=42) +# try: +# state = task_hub_client.wait_for_orchestration_completion(id, timeout=3) +# assert False, "Orchestration should not have completed" +# except TimeoutError: +# pass + +# # Resume the orchestration and wait for it to complete +# task_hub_client.resume_orchestration(id) +# state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) +# assert state is not None +# assert state.runtime_status == client.OrchestrationStatus.COMPLETED +# assert state.serialized_output == json.dumps(42) + + +def test_terminate(): + def orchestrator(ctx: task.OrchestrationContext, _): + result = yield ctx.wait_for_external_event("my_event") + return result + + # Start a worker, which will connect to the sidecar in a background thread + with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=None) as w: + w.add_orchestrator(orchestrator) + w.start() + + task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=None) + id = task_hub_client.schedule_new_orchestration(orchestrator) + state = task_hub_client.wait_for_orchestration_start(id, timeout=30) + assert state is not None + assert state.runtime_status == client.OrchestrationStatus.RUNNING + + task_hub_client.terminate_orchestration(id, output="some reason for termination") + state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) + assert state is not None + assert state.runtime_status == client.OrchestrationStatus.TERMINATED + assert state.serialized_output == json.dumps("some reason for termination") + +def test_terminate_recursive(): + def root(ctx: task.OrchestrationContext, _): + result = yield ctx.call_sub_orchestrator(child) + return result + def child(ctx: task.OrchestrationContext, _): + result = yield ctx.wait_for_external_event("my_event") + return result + + # Start a worker, which will connect to the sidecar in a background thread + with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=None) as w: + w.add_orchestrator(root) + w.add_orchestrator(child) + w.start() + + task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=None) + id = task_hub_client.schedule_new_orchestration(root) + state = task_hub_client.wait_for_orchestration_start(id, timeout=30) + assert state is not None + assert state.runtime_status == client.OrchestrationStatus.RUNNING + + # Terminate root orchestration(recursive set to True by default) + task_hub_client.terminate_orchestration(id, output="some reason for termination") + state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) + assert state is not None + assert state.runtime_status == client.OrchestrationStatus.TERMINATED + + # Verify that child orchestration is also terminated + c = task_hub_client.wait_for_orchestration_completion(id, timeout=30) + assert state is not None + assert state.runtime_status == client.OrchestrationStatus.TERMINATED + + task_hub_client.purge_orchestration(id) + state = task_hub_client.get_orchestration_state(id) + assert state is None + + +# def test_continue_as_new(): +# all_results = [] + +# def orchestrator(ctx: task.OrchestrationContext, input: int): +# result = yield ctx.wait_for_external_event("my_event") +# if not ctx.is_replaying: +# # NOTE: Real orchestrations should never interact with nonlocal variables like this. +# nonlocal all_results +# all_results.append(result) + +# if len(all_results) <= 4: +# ctx.continue_as_new(max(all_results), save_events=True) +# else: +# return all_results + +# # Start a worker, which will connect to the sidecar in a background thread +# with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, +# taskhub=taskhub_name, token_credential=None) as w: +# w.add_orchestrator(orchestrator) +# w.start() + +# task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, +# taskhub=taskhub_name, token_credential=None) +# id = task_hub_client.schedule_new_orchestration(orchestrator, input=0) +# task_hub_client.raise_orchestration_event(id, "my_event", data=1) +# task_hub_client.raise_orchestration_event(id, "my_event", data=2) +# task_hub_client.raise_orchestration_event(id, "my_event", data=3) +# task_hub_client.raise_orchestration_event(id, "my_event", data=4) +# task_hub_client.raise_orchestration_event(id, "my_event", data=5) + +# state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) +# assert state is not None +# assert state.runtime_status == client.OrchestrationStatus.COMPLETED +# assert state.serialized_output == json.dumps(all_results) +# assert state.serialized_input == json.dumps(4) +# assert all_results == [1, 2, 3, 4, 5] + + +# NOTE: This test fails when running against durabletask-go with sqlite because the sqlite backend does not yet +# support orchestration ID reuse. This gap is being tracked here: +# https://github.com/microsoft/durabletask-go/issues/42 +def test_retry_policies(): + # This test verifies that the retry policies are working as expected. + # It does this by creating an orchestration that calls a sub-orchestrator, + # which in turn calls an activity that always fails. + # In this test, the retry policies are added, and the orchestration + # should still fail. But, number of times the sub-orchestrator and activity + # is called should increase as per the retry policies. + + child_orch_counter = 0 + throw_activity_counter = 0 + + # Second setup: With retry policies + retry_policy = task.RetryPolicy( + first_retry_interval=timedelta(seconds=1), + max_number_of_attempts=3, + backoff_coefficient=1, + max_retry_interval=timedelta(seconds=10), + retry_timeout=timedelta(seconds=30)) + + def parent_orchestrator_with_retry(ctx: task.OrchestrationContext, _): + yield ctx.call_sub_orchestrator(child_orchestrator_with_retry, retry_policy=retry_policy) + + def child_orchestrator_with_retry(ctx: task.OrchestrationContext, _): + nonlocal child_orch_counter + if not ctx.is_replaying: + # NOTE: Real orchestrations should never interact with nonlocal variables like this. + # This is done only for testing purposes. + child_orch_counter += 1 + yield ctx.call_activity(throw_activity_with_retry, retry_policy=retry_policy) + + def throw_activity_with_retry(ctx: task.ActivityContext, _): + nonlocal throw_activity_counter + throw_activity_counter += 1 + raise RuntimeError("Kah-BOOOOM!!!") + + with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=None) as w: + w.add_orchestrator(parent_orchestrator_with_retry) + w.add_orchestrator(child_orchestrator_with_retry) + w.add_activity(throw_activity_with_retry) + w.start() + + task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=None) + id = task_hub_client.schedule_new_orchestration(parent_orchestrator_with_retry) + state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) + assert state is not None + assert state.runtime_status == client.OrchestrationStatus.FAILED + assert state.failure_details is not None + assert state.failure_details.error_type == "TaskFailedError" + assert state.failure_details.message.startswith("Sub-orchestration task #1 failed:") + assert state.failure_details.message.endswith("Activity task #1 failed: Kah-BOOOOM!!!") + assert state.failure_details.stack_trace is not None + assert throw_activity_counter == 9 + assert child_orch_counter == 3 + + +def test_retry_timeout(): + # This test verifies that the retry timeout is working as expected. + # Max number of attempts is 5 and retry timeout is 14 seconds. + # Total seconds consumed till 4th attempt is 1 + 2 + 4 + 8 = 15 seconds. + # So, the 5th attempt should not be made and the orchestration should fail. + throw_activity_counter = 0 + retry_policy = task.RetryPolicy( + first_retry_interval=timedelta(seconds=1), + max_number_of_attempts=5, + backoff_coefficient=2, + max_retry_interval=timedelta(seconds=10), + retry_timeout=timedelta(seconds=14)) + + def mock_orchestrator(ctx: task.OrchestrationContext, _): + yield ctx.call_activity(throw_activity, retry_policy=retry_policy) + + def throw_activity(ctx: task.ActivityContext, _): + nonlocal throw_activity_counter + throw_activity_counter += 1 + raise RuntimeError("Kah-BOOOOM!!!") + + with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=None) as w: + w.add_orchestrator(mock_orchestrator) + w.add_activity(throw_activity) + w.start() + + task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=None) + id = task_hub_client.schedule_new_orchestration(mock_orchestrator) + state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) + assert state is not None + assert state.runtime_status == client.OrchestrationStatus.FAILED + assert state.failure_details is not None + assert state.failure_details.error_type == "TaskFailedError" + assert state.failure_details.message.endswith("Activity task #1 failed: Kah-BOOOOM!!!") + assert state.failure_details.stack_trace is not None + assert throw_activity_counter == 4 + +def test_custom_status(): + + def empty_orchestrator(ctx: task.OrchestrationContext, _): + ctx.set_custom_status("foobaz") + + # Start a worker, which will connect to the sidecar in a background thread + with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=None) as w: + w.add_orchestrator(empty_orchestrator) + w.start() + + c = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, + taskhub=taskhub_name, token_credential=None) + id = c.schedule_new_orchestration(empty_orchestrator) + state = c.wait_for_orchestration_completion(id, timeout=30) + + assert state is not None + assert state.name == task.get_name(empty_orchestrator) + assert state.instance_id == id + assert state.failure_details is None + assert state.runtime_status == client.OrchestrationStatus.COMPLETED + assert state.serialized_input is None + assert state.serialized_output is None + assert state.serialized_custom_status == "\"foobaz\"" diff --git a/tests/test_activity_executor.py b/tests/durabletask/test_activity_executor.py similarity index 100% rename from tests/test_activity_executor.py rename to tests/durabletask/test_activity_executor.py diff --git a/tests/test_client.py b/tests/durabletask/test_client.py similarity index 100% rename from tests/test_client.py rename to tests/durabletask/test_client.py diff --git a/tests/test_orchestration_e2e.py b/tests/durabletask/test_orchestration_e2e.py similarity index 100% rename from tests/test_orchestration_e2e.py rename to tests/durabletask/test_orchestration_e2e.py diff --git a/tests/test_orchestration_executor.py b/tests/durabletask/test_orchestration_executor.py similarity index 100% rename from tests/test_orchestration_executor.py rename to tests/durabletask/test_orchestration_executor.py From 62d20146c9da8a0c7e9088c7469c1389470270a0 Mon Sep 17 00:00:00 2001 From: Bernd Verst Date: Wed, 26 Mar 2025 13:29:33 -0700 Subject: [PATCH 22/81] Add missing protobuf dependency Signed-off-by: Albert Callarisa --- .../durabletask/azuremanaged/internal/py.typed | 0 durabletask-azuremanaged/durabletask/azuremanaged/py.typed | 0 durabletask-azuremanaged/pyproject.toml | 4 ++-- pyproject.toml | 3 ++- 4 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 durabletask-azuremanaged/durabletask/azuremanaged/internal/py.typed create mode 100644 durabletask-azuremanaged/durabletask/azuremanaged/py.typed diff --git a/durabletask-azuremanaged/durabletask/azuremanaged/internal/py.typed b/durabletask-azuremanaged/durabletask/azuremanaged/internal/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/durabletask-azuremanaged/durabletask/azuremanaged/py.typed b/durabletask-azuremanaged/durabletask/azuremanaged/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/durabletask-azuremanaged/pyproject.toml b/durabletask-azuremanaged/pyproject.toml index c4c8a96..9e724e4 100644 --- a/durabletask-azuremanaged/pyproject.toml +++ b/durabletask-azuremanaged/pyproject.toml @@ -9,7 +9,7 @@ build-backend = "setuptools.build_meta" [project] name = "durabletask.azuremanaged" -version = "0.1.2" +version = "0.1.3" description = "Extensions for the Durable Task Python SDK for integrating with the Durable Task Scheduler in Azure" keywords = [ "durable", @@ -26,7 +26,7 @@ requires-python = ">=3.9" license = {file = "LICENSE"} readme = "README.md" dependencies = [ - "durabletask>=0.2.0", + "durabletask>=0.2.1", "azure-identity>=1.19.0" ] diff --git a/pyproject.toml b/pyproject.toml index d3d9429..60a9d37 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -9,7 +9,7 @@ build-backend = "setuptools.build_meta" [project] name = "durabletask" -version = "0.2.0" +version = "0.2.1" description = "A Durable Task Client SDK for Python" keywords = [ "durable", @@ -26,6 +26,7 @@ license = {file = "LICENSE"} readme = "README.md" dependencies = [ "grpcio", + "protobuf" ] [project.urls] From 04fe99113baa8dc032c2ef8b42c2c5d9a0116283 Mon Sep 17 00:00:00 2001 From: Bernd Verst Date: Tue, 6 May 2025 07:32:00 -0700 Subject: [PATCH 23/81] Add user agent (#49) Signed-off-by: Albert Callarisa --- .../durabletask/azuremanaged/client.py | 8 +- .../internal/durabletask_grpc_interceptor.py | 21 +++- .../durabletask/azuremanaged/worker.py | 8 +- .../test_durabletask_grpc_interceptor.py | 108 ++++++++++++++++++ 4 files changed, 134 insertions(+), 11 deletions(-) create mode 100644 tests/durabletask-azuremanaged/test_durabletask_grpc_interceptor.py diff --git a/durabletask-azuremanaged/durabletask/azuremanaged/client.py b/durabletask-azuremanaged/durabletask/azuremanaged/client.py index 1d8cecd..e1c2445 100644 --- a/durabletask-azuremanaged/durabletask/azuremanaged/client.py +++ b/durabletask-azuremanaged/durabletask/azuremanaged/client.py @@ -1,11 +1,13 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -from azure.core.credentials import TokenCredential from typing import Optional -from durabletask.azuremanaged.internal.durabletask_grpc_interceptor import \ - DTSDefaultClientInterceptorImpl +from azure.core.credentials import TokenCredential + +from durabletask.azuremanaged.internal.durabletask_grpc_interceptor import ( + DTSDefaultClientInterceptorImpl, +) from durabletask.client import TaskHubGrpcClient diff --git a/durabletask-azuremanaged/durabletask/azuremanaged/internal/durabletask_grpc_interceptor.py b/durabletask-azuremanaged/durabletask/azuremanaged/internal/durabletask_grpc_interceptor.py index 077905e..fa1459f 100644 --- a/durabletask-azuremanaged/durabletask/azuremanaged/internal/durabletask_grpc_interceptor.py +++ b/durabletask-azuremanaged/durabletask/azuremanaged/internal/durabletask_grpc_interceptor.py @@ -1,15 +1,17 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -import grpc +from importlib.metadata import version from typing import Optional +import grpc from azure.core.credentials import TokenCredential -from durabletask.azuremanaged.internal.access_token_manager import \ - AccessTokenManager +from durabletask.azuremanaged.internal.access_token_manager import AccessTokenManager from durabletask.internal.grpc_interceptor import ( - DefaultClientInterceptorImpl, _ClientCallDetails) + DefaultClientInterceptorImpl, + _ClientCallDetails, +) class DTSDefaultClientInterceptorImpl (DefaultClientInterceptorImpl): @@ -18,7 +20,16 @@ class DTSDefaultClientInterceptorImpl (DefaultClientInterceptorImpl): interceptor to add additional headers to all calls as needed.""" def __init__(self, token_credential: Optional[TokenCredential], taskhub_name: str): - self._metadata = [("taskhub", taskhub_name)] + try: + # Get the version of the azuremanaged package + sdk_version = version('durabletask-azuremanaged') + except Exception: + # Fallback if version cannot be determined + sdk_version = "unknown" + user_agent = f"durabletask-python/{sdk_version}" + self._metadata = [ + ("taskhub", taskhub_name), + ("x-user-agent", user_agent)] # 'user-agent' is a reserved header in grpc, so we use 'x-user-agent' instead super().__init__(self._metadata) if token_credential is not None: diff --git a/durabletask-azuremanaged/durabletask/azuremanaged/worker.py b/durabletask-azuremanaged/durabletask/azuremanaged/worker.py index 8bdff3d..fd3b1e4 100644 --- a/durabletask-azuremanaged/durabletask/azuremanaged/worker.py +++ b/durabletask-azuremanaged/durabletask/azuremanaged/worker.py @@ -1,11 +1,13 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -from azure.core.credentials import TokenCredential from typing import Optional -from durabletask.azuremanaged.internal.durabletask_grpc_interceptor import \ - DTSDefaultClientInterceptorImpl +from azure.core.credentials import TokenCredential + +from durabletask.azuremanaged.internal.durabletask_grpc_interceptor import ( + DTSDefaultClientInterceptorImpl, +) from durabletask.worker import TaskHubGrpcWorker diff --git a/tests/durabletask-azuremanaged/test_durabletask_grpc_interceptor.py b/tests/durabletask-azuremanaged/test_durabletask_grpc_interceptor.py new file mode 100644 index 0000000..62978f9 --- /dev/null +++ b/tests/durabletask-azuremanaged/test_durabletask_grpc_interceptor.py @@ -0,0 +1,108 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +import threading +import unittest +from concurrent import futures +from importlib.metadata import version + +import grpc + +from durabletask.azuremanaged.client import DurableTaskSchedulerClient +from durabletask.azuremanaged.internal.durabletask_grpc_interceptor import ( + DTSDefaultClientInterceptorImpl, +) +from durabletask.internal import orchestrator_service_pb2 as pb +from durabletask.internal import orchestrator_service_pb2_grpc as stubs + + +class MockTaskHubSidecarServiceServicer(stubs.TaskHubSidecarServiceServicer): + """Mock implementation of the TaskHubSidecarService for testing.""" + + def __init__(self): + self.captured_metadata = {} + self.requests_received = 0 + + def GetInstance(self, request, context): + """Implementation of GetInstance that captures the metadata.""" + # Store all metadata key-value pairs from the context + for key, value in context.invocation_metadata(): + self.captured_metadata[key] = value + + self.requests_received += 1 + + # Return a mock response + response = pb.GetInstanceResponse(exists=False) + return response + + +class TestDurableTaskGrpcInterceptor(unittest.TestCase): + """Tests for the DTSDefaultClientInterceptorImpl class.""" + + @classmethod + def setUpClass(cls): + # Start a real gRPC server on a free port + cls.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) + cls.port = cls.server.add_insecure_port('[::]:0') # Bind to a random free port + cls.server_address = f"localhost:{cls.port}" + + # Add our mock service implementation to the server + cls.mock_servicer = MockTaskHubSidecarServiceServicer() + stubs.add_TaskHubSidecarServiceServicer_to_server(cls.mock_servicer, cls.server) + + # Start the server in a background thread + cls.server.start() + + @classmethod + def tearDownClass(cls): + cls.server.stop(grace=None) + + def test_user_agent_metadata_passed_in_request(self): + """Test that the user agent metadata is correctly passed in gRPC requests.""" + # Create a client that connects to our mock server + # Note: secure_channel is False and token_credential is None as specified + task_hub_client = DurableTaskSchedulerClient( + host_address=self.server_address, + secure_channel=False, + taskhub="test-taskhub", + token_credential=None + ) + + # Make a client call that will trigger our interceptor + task_hub_client.get_orchestration_state("test-instance-id") + + # Verify the request was received by our mock server + self.assertEqual(1, self.mock_servicer.requests_received, "Expected one request to be received") + + # Check if our custom x-user-agent header was correctly set + self.assertIn("x-user-agent", self.mock_servicer.captured_metadata, "x-user-agent header not found") + + # Get what we expect our user agent to be + try: + expected_version = version('durabletask-azuremanaged') + except Exception: + expected_version = "unknown" + + expected_user_agent = f"durabletask-python/{expected_version}" + self.assertEqual( + expected_user_agent, + self.mock_servicer.captured_metadata["x-user-agent"], + f"Expected x-user-agent header to be '{expected_user_agent}'" + ) + + # Check if the taskhub header was correctly set + self.assertIn("taskhub", self.mock_servicer.captured_metadata, "taskhub header not found") + self.assertEqual("test-taskhub", self.mock_servicer.captured_metadata["taskhub"]) + + # Verify the standard gRPC user-agent is different from our custom one + # Note: gRPC automatically adds its own "user-agent" header + self.assertIn("user-agent", self.mock_servicer.captured_metadata, "gRPC user-agent header not found") + self.assertNotEqual( + self.mock_servicer.captured_metadata["user-agent"], + self.mock_servicer.captured_metadata["x-user-agent"], + "gRPC user-agent should be different from our custom x-user-agent" + ) + + +if __name__ == "__main__": + unittest.main() From e6be3d6c8fc6228551a1c9a7fad91627435875d1 Mon Sep 17 00:00:00 2001 From: Bernd Verst Date: Tue, 20 May 2025 11:22:16 -0700 Subject: [PATCH 24/81] Bump azuremanaged version for release Signed-off-by: Albert Callarisa --- dev-requirements.txt | 2 +- durabletask-azuremanaged/pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/dev-requirements.txt b/dev-requirements.txt index 119f072..b3ff6f7 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -1 +1 @@ -grpcio-tools==1.62.3 # 1.62.X is the latest version before protobuf 1.26.X is used which has breaking changes for Python +grpcio-tools diff --git a/durabletask-azuremanaged/pyproject.toml b/durabletask-azuremanaged/pyproject.toml index 9e724e4..5962285 100644 --- a/durabletask-azuremanaged/pyproject.toml +++ b/durabletask-azuremanaged/pyproject.toml @@ -9,7 +9,7 @@ build-backend = "setuptools.build_meta" [project] name = "durabletask.azuremanaged" -version = "0.1.3" +version = "0.1.4" description = "Extensions for the Durable Task Python SDK for integrating with the Durable Task Scheduler in Azure" keywords = [ "durable", From c9704b39de0d41f71853c4a0764bfa161cd9c871 Mon Sep 17 00:00:00 2001 From: Bernd Verst Date: Tue, 3 Jun 2025 10:21:21 -0700 Subject: [PATCH 25/81] Fix and improve connection handling, add concurrency options, prep for release (#50) * Reconnect upon connection error * concurrency * Test updates * More updates * more concurrency stuff * final touches * fix import * update log level * fix exports * more fixup * test updateS * more test imports * fix github workflow pytest * cleanup tests * Python 3.9 specific test fix * fixup reconnection for new concurrency model * autopep8 * Remove existing duplicate import Signed-off-by: Albert Callarisa --- .github/workflows/pr-validation.yml | 3 +- CHANGELOG.md | 14 +- .../durabletask/azuremanaged/worker.py | 53 +- durabletask-azuremanaged/pyproject.toml | 6 +- durabletask/__init__.py | 3 + durabletask/worker.py | 1037 ++++++++++++++--- examples/README.md | 2 +- pyproject.toml | 2 +- tests/durabletask/test_client.py | 7 +- tests/durabletask/test_concurrency_options.py | 96 ++ .../test_worker_concurrency_loop.py | 140 +++ .../test_worker_concurrency_loop_async.py | 80 ++ 12 files changed, 1241 insertions(+), 202 deletions(-) create mode 100644 tests/durabletask/test_concurrency_options.py create mode 100644 tests/durabletask/test_worker_concurrency_loop.py create mode 100644 tests/durabletask/test_worker_concurrency_loop_async.py diff --git a/.github/workflows/pr-validation.yml b/.github/workflows/pr-validation.yml index dddcc53..1d14d83 100644 --- a/.github/workflows/pr-validation.yml +++ b/.github/workflows/pr-validation.yml @@ -25,11 +25,12 @@ jobs: uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - - name: Install durabletask dependencies + - name: Install durabletask dependencies and the library itself in editable mode run: | python -m pip install --upgrade pip pip install flake8 pytest pip install -r requirements.txt + pip install -e . - name: Install durabletask-azuremanaged dependencies working-directory: examples/dts run: | diff --git a/CHANGELOG.md b/CHANGELOG.md index 13b0e69..6921faa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,13 +5,23 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -## v0.2.0 (Unreleased) +## v0.3.0 + +### New + +- Added `ConcurrencyOptions` class for fine-grained concurrency control with separate limits for activities and orchestrations. The thread pool worker count can also be configured. + +### Fixed + +- Fixed an issue where a worker could not recover after its connection was interrupted or severed + +## v0.2.1 ### New - Added `set_custom_status` orchestrator API ([#31](https://github.com/microsoft/durabletask-python/pull/31)) - contributed by [@famarting](https://github.com/famarting) - Added `purge_orchestration` client API ([#34](https://github.com/microsoft/durabletask-python/pull/34)) - contributed by [@famarting](https://github.com/famarting) -- Added new `durabletask-azuremanaged` package for use with the [Durable Task Scheduler](https://techcommunity.microsoft.com/blog/appsonazureblog/announcing-limited-early-access-of-the-durable-task-scheduler-for-azure-durable-/4286526) - by [@RyanLettieri](https://github.com/RyanLettieri) +- Added new `durabletask-azuremanaged` package for use with the [Durable Task Scheduler](https://learn.microsoft.com/azure/azure-functions/durable/durable-task-scheduler/durable-task-scheduler) - by [@RyanLettieri](https://github.com/RyanLettieri) ### Changes diff --git a/durabletask-azuremanaged/durabletask/azuremanaged/worker.py b/durabletask-azuremanaged/durabletask/azuremanaged/worker.py index fd3b1e4..1135ae7 100644 --- a/durabletask-azuremanaged/durabletask/azuremanaged/worker.py +++ b/durabletask-azuremanaged/durabletask/azuremanaged/worker.py @@ -5,19 +5,59 @@ from azure.core.credentials import TokenCredential -from durabletask.azuremanaged.internal.durabletask_grpc_interceptor import ( - DTSDefaultClientInterceptorImpl, -) -from durabletask.worker import TaskHubGrpcWorker +from durabletask.azuremanaged.internal.durabletask_grpc_interceptor import \ + DTSDefaultClientInterceptorImpl +from durabletask.worker import ConcurrencyOptions, TaskHubGrpcWorker # Worker class used for Durable Task Scheduler (DTS) class DurableTaskSchedulerWorker(TaskHubGrpcWorker): + """A worker implementation for Azure Durable Task Scheduler (DTS). + + This class extends TaskHubGrpcWorker to provide integration with Azure's + Durable Task Scheduler service. It handles authentication via Azure credentials + and configures the necessary gRPC interceptors for DTS communication. + + Args: + host_address (str): The gRPC endpoint address of the DTS service. + taskhub (str): The name of the task hub. Cannot be empty. + token_credential (Optional[TokenCredential]): Azure credential for authentication. + If None, anonymous authentication will be used. + secure_channel (bool, optional): Whether to use a secure gRPC channel (TLS). + Defaults to True. + concurrency_options (Optional[ConcurrencyOptions], optional): Configuration + for controlling worker concurrency limits. If None, default concurrency + settings will be used. + + Raises: + ValueError: If taskhub is empty or None. + + Example: + >>> from azure.identity import DefaultAzureCredential + >>> from durabletask.azuremanaged import DurableTaskSchedulerWorker + >>> from durabletask.worker import ConcurrencyOptions + >>> + >>> credential = DefaultAzureCredential() + >>> concurrency = ConcurrencyOptions(max_concurrent_activities=10) + >>> worker = DurableTaskSchedulerWorker( + ... host_address="my-dts-service.azure.com:443", + ... taskhub="my-task-hub", + ... token_credential=credential, + ... concurrency_options=concurrency + ... ) + + Note: + This worker automatically configures DTS-specific gRPC interceptors + for authentication and task hub routing. The parent class metadata + parameter is set to None since authentication is handled by the + DTS interceptor. + """ def __init__(self, *, host_address: str, taskhub: str, token_credential: Optional[TokenCredential], - secure_channel: bool = True): + secure_channel: bool = True, + concurrency_options: Optional[ConcurrencyOptions] = None): if not taskhub: raise ValueError("The taskhub value cannot be empty.") @@ -30,4 +70,5 @@ def __init__(self, *, host_address=host_address, secure_channel=secure_channel, metadata=None, - interceptors=interceptors) + interceptors=interceptors, + concurrency_options=concurrency_options) diff --git a/durabletask-azuremanaged/pyproject.toml b/durabletask-azuremanaged/pyproject.toml index 5962285..250cfcc 100644 --- a/durabletask-azuremanaged/pyproject.toml +++ b/durabletask-azuremanaged/pyproject.toml @@ -9,8 +9,8 @@ build-backend = "setuptools.build_meta" [project] name = "durabletask.azuremanaged" -version = "0.1.4" -description = "Extensions for the Durable Task Python SDK for integrating with the Durable Task Scheduler in Azure" +version = "0.2.0" +description = "Durable Task Python SDK provider implementation for the Azure Durable Task Scheduler" keywords = [ "durable", "task", @@ -26,7 +26,7 @@ requires-python = ">=3.9" license = {file = "LICENSE"} readme = "README.md" dependencies = [ - "durabletask>=0.2.1", + "durabletask>=0.3.0", "azure-identity>=1.19.0" ] diff --git a/durabletask/__init__.py b/durabletask/__init__.py index a37823c..88af82b 100644 --- a/durabletask/__init__.py +++ b/durabletask/__init__.py @@ -3,5 +3,8 @@ """Durable Task SDK for Python""" +from durabletask.worker import ConcurrencyOptions + +__all__ = ["ConcurrencyOptions"] PACKAGE_NAME = "durabletask" diff --git a/durabletask/worker.py b/durabletask/worker.py index 2c31e52..b433a83 100644 --- a/durabletask/worker.py +++ b/durabletask/worker.py @@ -1,8 +1,12 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -import concurrent.futures +import asyncio +import inspect import logging +import os +import random +from concurrent.futures import ThreadPoolExecutor from datetime import datetime, timedelta from threading import Event, Thread from types import GeneratorType @@ -12,19 +16,63 @@ from google.protobuf import empty_pb2 import durabletask.internal.helpers as ph -import durabletask.internal.helpers as pbh import durabletask.internal.orchestrator_service_pb2 as pb import durabletask.internal.orchestrator_service_pb2_grpc as stubs import durabletask.internal.shared as shared from durabletask import task from durabletask.internal.grpc_interceptor import DefaultClientInterceptorImpl -TInput = TypeVar('TInput') -TOutput = TypeVar('TOutput') +TInput = TypeVar("TInput") +TOutput = TypeVar("TOutput") + + +class ConcurrencyOptions: + """Configuration options for controlling concurrency of different work item types and the thread pool size. + + This class provides fine-grained control over concurrent processing limits for + activities, orchestrations and the thread pool size. + """ + + def __init__( + self, + maximum_concurrent_activity_work_items: Optional[int] = None, + maximum_concurrent_orchestration_work_items: Optional[int] = None, + maximum_thread_pool_workers: Optional[int] = None, + ): + """Initialize concurrency options. + + Args: + maximum_concurrent_activity_work_items: Maximum number of activity work items + that can be processed concurrently. Defaults to 100 * processor_count. + maximum_concurrent_orchestration_work_items: Maximum number of orchestration work items + that can be processed concurrently. Defaults to 100 * processor_count. + maximum_thread_pool_workers: Maximum number of thread pool workers to use. + """ + processor_count = os.cpu_count() or 1 + default_concurrency = 100 * processor_count + # see https://docs.python.org/3/library/concurrent.futures.html + default_max_workers = processor_count + 4 + + self.maximum_concurrent_activity_work_items = ( + maximum_concurrent_activity_work_items + if maximum_concurrent_activity_work_items is not None + else default_concurrency + ) + self.maximum_concurrent_orchestration_work_items = ( + maximum_concurrent_orchestration_work_items + if maximum_concurrent_orchestration_work_items is not None + else default_concurrency + ) + + self.maximum_thread_pool_workers = ( + maximum_thread_pool_workers + if maximum_thread_pool_workers is not None + else default_max_workers + ) -class _Registry: +class _Registry: orchestrators: dict[str, task.Orchestrator] activities: dict[str, task.Activity] @@ -34,7 +82,7 @@ def __init__(self): def add_orchestrator(self, fn: task.Orchestrator) -> str: if fn is None: - raise ValueError('An orchestrator function argument is required.') + raise ValueError("An orchestrator function argument is required.") name = task.get_name(fn) self.add_named_orchestrator(name, fn) @@ -42,7 +90,7 @@ def add_orchestrator(self, fn: task.Orchestrator) -> str: def add_named_orchestrator(self, name: str, fn: task.Orchestrator) -> None: if not name: - raise ValueError('A non-empty orchestrator name is required.') + raise ValueError("A non-empty orchestrator name is required.") if name in self.orchestrators: raise ValueError(f"A '{name}' orchestrator already exists.") @@ -53,7 +101,7 @@ def get_orchestrator(self, name: str) -> Optional[task.Orchestrator]: def add_activity(self, fn: task.Activity) -> str: if fn is None: - raise ValueError('An activity function argument is required.') + raise ValueError("An activity function argument is required.") name = task.get_name(fn) self.add_named_activity(name, fn) @@ -61,7 +109,7 @@ def add_activity(self, fn: task.Activity) -> str: def add_named_activity(self, name: str, fn: task.Activity) -> None: if not name: - raise ValueError('A non-empty activity name is required.') + raise ValueError("A non-empty activity name is required.") if name in self.activities: raise ValueError(f"A '{name}' activity already exists.") @@ -73,32 +121,125 @@ def get_activity(self, name: str) -> Optional[task.Activity]: class OrchestratorNotRegisteredError(ValueError): """Raised when attempting to start an orchestration that is not registered""" + pass class ActivityNotRegisteredError(ValueError): """Raised when attempting to call an activity that is not registered""" + pass class TaskHubGrpcWorker: + """A gRPC-based worker for processing durable task orchestrations and activities. + + This worker connects to a Durable Task backend service via gRPC to receive and process + work items including orchestration functions and activity functions. It provides + concurrent execution capabilities with configurable limits and automatic retry handling. + + The worker manages the complete lifecycle: + - Registers orchestrator and activity functions + - Connects to the gRPC backend service + - Receives work items and executes them concurrently + - Handles failures, retries, and state management + - Provides logging and monitoring capabilities + + Args: + host_address (Optional[str], optional): The gRPC endpoint address of the backend service. + Defaults to the value from environment variables or localhost. + metadata (Optional[list[tuple[str, str]]], optional): gRPC metadata to include with + requests. Used for authentication and routing. Defaults to None. + log_handler (optional): Custom logging handler for worker logs. Defaults to None. + log_formatter (Optional[logging.Formatter], optional): Custom log formatter. + Defaults to None. + secure_channel (bool, optional): Whether to use a secure gRPC channel (TLS). + Defaults to False. + interceptors (Optional[Sequence[shared.ClientInterceptor]], optional): Custom gRPC + interceptors to apply to the channel. Defaults to None. + concurrency_options (Optional[ConcurrencyOptions], optional): Configuration for + controlling worker concurrency limits. If None, default settings are used. + + Attributes: + concurrency_options (ConcurrencyOptions): The current concurrency configuration. + + Example: + Basic worker setup: + + >>> from durabletask.worker import TaskHubGrpcWorker, ConcurrencyOptions + >>> + >>> # Create worker with custom concurrency settings + >>> concurrency = ConcurrencyOptions( + ... maximum_concurrent_activity_work_items=50, + ... maximum_concurrent_orchestration_work_items=20 + ... ) + >>> worker = TaskHubGrpcWorker( + ... host_address="localhost:4001", + ... concurrency_options=concurrency + ... ) + >>> + >>> # Register functions + >>> @worker.add_orchestrator + ... def my_orchestrator(context, input): + ... result = yield context.call_activity("my_activity", input="hello") + ... return result + >>> + >>> @worker.add_activity + ... def my_activity(context, input): + ... return f"Processed: {input}" + >>> + >>> # Start the worker + >>> worker.start() + >>> # ... worker runs in background thread + >>> worker.stop() + + Using as context manager: + + >>> with TaskHubGrpcWorker() as worker: + ... worker.add_orchestrator(my_orchestrator) + ... worker.add_activity(my_activity) + ... worker.start() + ... # Worker automatically stops when exiting context + + Raises: + RuntimeError: If attempting to add orchestrators/activities while the worker is running, + or if starting a worker that is already running. + OrchestratorNotRegisteredError: If an orchestration work item references an + unregistered orchestrator function. + ActivityNotRegisteredError: If an activity work item references an unregistered + activity function. + """ + _response_stream: Optional[grpc.Future] = None _interceptors: Optional[list[shared.ClientInterceptor]] = None - def __init__(self, *, - host_address: Optional[str] = None, - metadata: Optional[list[tuple[str, str]]] = None, - log_handler=None, - log_formatter: Optional[logging.Formatter] = None, - secure_channel: bool = False, - interceptors: Optional[Sequence[shared.ClientInterceptor]] = None): + def __init__( + self, + *, + host_address: Optional[str] = None, + metadata: Optional[list[tuple[str, str]]] = None, + log_handler=None, + log_formatter: Optional[logging.Formatter] = None, + secure_channel: bool = False, + interceptors: Optional[Sequence[shared.ClientInterceptor]] = None, + concurrency_options: Optional[ConcurrencyOptions] = None, + ): self._registry = _Registry() - self._host_address = host_address if host_address else shared.get_default_host_address() + self._host_address = ( + host_address if host_address else shared.get_default_host_address() + ) self._logger = shared.get_logger("worker", log_handler, log_formatter) self._shutdown = Event() self._is_running = False self._secure_channel = secure_channel + # Use provided concurrency options or create default ones + self._concurrency_options = ( + concurrency_options + if concurrency_options is not None + else ConcurrencyOptions() + ) + # Determine the interceptors to use if interceptors is not None: self._interceptors = list(interceptors) @@ -109,6 +250,13 @@ def __init__(self, *, else: self._interceptors = None + self._async_worker_manager = _AsyncWorkerManager(self._concurrency_options) + + @property + def concurrency_options(self) -> ConcurrencyOptions: + """Get the current concurrency options for this worker.""" + return self._concurrency_options + def __enter__(self): return self @@ -118,72 +266,223 @@ def __exit__(self, type, value, traceback): def add_orchestrator(self, fn: task.Orchestrator) -> str: """Registers an orchestrator function with the worker.""" if self._is_running: - raise RuntimeError('Orchestrators cannot be added while the worker is running.') + raise RuntimeError( + "Orchestrators cannot be added while the worker is running." + ) return self._registry.add_orchestrator(fn) def add_activity(self, fn: task.Activity) -> str: """Registers an activity function with the worker.""" if self._is_running: - raise RuntimeError('Activities cannot be added while the worker is running.') + raise RuntimeError( + "Activities cannot be added while the worker is running." + ) return self._registry.add_activity(fn) def start(self): """Starts the worker on a background thread and begins listening for work items.""" - channel = shared.get_grpc_channel(self._host_address, self._secure_channel, self._interceptors) - stub = stubs.TaskHubSidecarServiceStub(channel) - if self._is_running: - raise RuntimeError('The worker is already running.') + raise RuntimeError("The worker is already running.") def run_loop(): - # TODO: Investigate whether asyncio could be used to enable greater concurrency for async activity - # functions. We'd need to know ahead of time whether a function is async or not. - # TODO: Max concurrency configuration settings - with concurrent.futures.ThreadPoolExecutor(max_workers=16) as executor: - while not self._shutdown.is_set(): - try: - # send a "Hello" message to the sidecar to ensure that it's listening - stub.Hello(empty_pb2.Empty()) - - # stream work items - self._response_stream = stub.GetWorkItems(pb.GetWorkItemsRequest()) - self._logger.info(f'Successfully connected to {self._host_address}. Waiting for work items...') - - # The stream blocks until either a work item is received or the stream is canceled - # by another thread (see the stop() method). - for work_item in self._response_stream: # type: ignore - request_type = work_item.WhichOneof('request') - self._logger.debug(f'Received "{request_type}" work item') - if work_item.HasField('orchestratorRequest'): - executor.submit(self._execute_orchestrator, work_item.orchestratorRequest, stub, work_item.completionToken) - elif work_item.HasField('activityRequest'): - executor.submit(self._execute_activity, work_item.activityRequest, stub, work_item.completionToken) - elif work_item.HasField('healthPing'): - pass # no-op - else: - self._logger.warning(f'Unexpected work item type: {request_type}') - - except grpc.RpcError as rpc_error: - if rpc_error.code() == grpc.StatusCode.CANCELLED: # type: ignore - self._logger.info(f'Disconnected from {self._host_address}') - elif rpc_error.code() == grpc.StatusCode.UNAVAILABLE: # type: ignore - self._logger.warning( - f'The sidecar at address {self._host_address} is unavailable - will continue retrying') - else: - self._logger.warning(f'Unexpected error: {rpc_error}') - except Exception as ex: - self._logger.warning(f'Unexpected error: {ex}') - - # CONSIDER: exponential backoff - self._shutdown.wait(5) - self._logger.info("No longer listening for work items") - return + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + loop.run_until_complete(self._async_run_loop()) self._logger.info(f"Starting gRPC worker that connects to {self._host_address}") self._runLoop = Thread(target=run_loop) self._runLoop.start() self._is_running = True + async def _async_run_loop(self): + worker_task = asyncio.create_task(self._async_worker_manager.run()) + # Connection state management for retry fix + current_channel = None + current_stub = None + current_reader_thread = None + conn_retry_count = 0 + conn_max_retry_delay = 60 + + def create_fresh_connection(): + nonlocal current_channel, current_stub, conn_retry_count + if current_channel: + try: + current_channel.close() + except Exception: + pass + current_channel = None + current_stub = None + try: + current_channel = shared.get_grpc_channel( + self._host_address, self._secure_channel, self._interceptors + ) + current_stub = stubs.TaskHubSidecarServiceStub(current_channel) + current_stub.Hello(empty_pb2.Empty()) + conn_retry_count = 0 + self._logger.info(f"Created fresh connection to {self._host_address}") + except Exception as e: + self._logger.warning(f"Failed to create connection: {e}") + current_channel = None + current_stub = None + raise + + def invalidate_connection(): + nonlocal current_channel, current_stub, current_reader_thread + # Cancel the response stream first to signal the reader thread to stop + if self._response_stream is not None: + try: + self._response_stream.cancel() + except Exception: + pass + self._response_stream = None + + # Wait for the reader thread to finish + if current_reader_thread is not None: + try: + current_reader_thread.join(timeout=2) + if current_reader_thread.is_alive(): + self._logger.warning("Stream reader thread did not shut down gracefully") + except Exception: + pass + current_reader_thread = None + + # Close the channel + if current_channel: + try: + current_channel.close() + except Exception: + pass + current_channel = None + current_stub = None + + def should_invalidate_connection(rpc_error): + error_code = rpc_error.code() # type: ignore + connection_level_errors = { + grpc.StatusCode.UNAVAILABLE, + grpc.StatusCode.DEADLINE_EXCEEDED, + grpc.StatusCode.CANCELLED, + grpc.StatusCode.UNAUTHENTICATED, + grpc.StatusCode.ABORTED, + } + return error_code in connection_level_errors + + while not self._shutdown.is_set(): + if current_stub is None: + try: + create_fresh_connection() + except Exception: + conn_retry_count += 1 + delay = min( + conn_max_retry_delay, + (2 ** min(conn_retry_count, 6)) + random.uniform(0, 1), + ) + self._logger.warning( + f"Connection failed, retrying in {delay:.2f} seconds (attempt {conn_retry_count})" + ) + if self._shutdown.wait(delay): + break + continue + try: + assert current_stub is not None + stub = current_stub + get_work_items_request = pb.GetWorkItemsRequest( + maxConcurrentOrchestrationWorkItems=self._concurrency_options.maximum_concurrent_orchestration_work_items, + maxConcurrentActivityWorkItems=self._concurrency_options.maximum_concurrent_activity_work_items, + ) + self._response_stream = stub.GetWorkItems(get_work_items_request) + self._logger.info( + f"Successfully connected to {self._host_address}. Waiting for work items..." + ) + + # Use a thread to read from the blocking gRPC stream and forward to asyncio + import queue + + work_item_queue = queue.Queue() + + def stream_reader(): + try: + for work_item in self._response_stream: + work_item_queue.put(work_item) + except Exception as e: + work_item_queue.put(e) + + import threading + + current_reader_thread = threading.Thread(target=stream_reader, daemon=True) + current_reader_thread.start() + loop = asyncio.get_running_loop() + while not self._shutdown.is_set(): + try: + work_item = await loop.run_in_executor( + None, work_item_queue.get + ) + if isinstance(work_item, Exception): + raise work_item + request_type = work_item.WhichOneof("request") + self._logger.debug(f'Received "{request_type}" work item') + if work_item.HasField("orchestratorRequest"): + self._async_worker_manager.submit_orchestration( + self._execute_orchestrator, + work_item.orchestratorRequest, + stub, + work_item.completionToken, + ) + elif work_item.HasField("activityRequest"): + self._async_worker_manager.submit_activity( + self._execute_activity, + work_item.activityRequest, + stub, + work_item.completionToken, + ) + elif work_item.HasField("healthPing"): + pass + else: + self._logger.warning( + f"Unexpected work item type: {request_type}" + ) + except Exception as e: + self._logger.warning(f"Error in work item stream: {e}") + raise e + current_reader_thread.join(timeout=1) + self._logger.info("Work item stream ended normally") + except grpc.RpcError as rpc_error: + should_invalidate = should_invalidate_connection(rpc_error) + if should_invalidate: + invalidate_connection() + error_code = rpc_error.code() # type: ignore + error_details = str(rpc_error) + + if error_code == grpc.StatusCode.CANCELLED: + self._logger.info(f"Disconnected from {self._host_address}") + break + elif error_code == grpc.StatusCode.UNAVAILABLE: + # Check if this is a connection timeout scenario + if "Timeout occurred" in error_details or "Failed to connect to remote host" in error_details: + self._logger.warning( + f"Connection timeout to {self._host_address}: {error_details} - will retry with fresh connection" + ) + else: + self._logger.warning( + f"The sidecar at address {self._host_address} is unavailable: {error_details} - will continue retrying" + ) + elif should_invalidate: + self._logger.warning( + f"Connection-level gRPC error ({error_code}): {rpc_error} - resetting connection" + ) + else: + self._logger.warning( + f"Application-level gRPC error ({error_code}): {rpc_error}" + ) + self._shutdown.wait(1) + except Exception as ex: + invalidate_connection() + self._logger.warning(f"Unexpected error: {ex}") + self._shutdown.wait(1) + invalidate_connection() + self._logger.info("No longer listening for work items") + self._async_worker_manager.shutdown() + await worker_task + def stop(self): """Stops the worker and waits for any pending work items to complete.""" if not self._is_running: @@ -195,51 +494,80 @@ def stop(self): self._response_stream.cancel() if self._runLoop is not None: self._runLoop.join(timeout=30) + self._async_worker_manager.shutdown() self._logger.info("Worker shutdown completed") self._is_running = False - def _execute_orchestrator(self, req: pb.OrchestratorRequest, stub: stubs.TaskHubSidecarServiceStub, completionToken): + def _execute_orchestrator( + self, + req: pb.OrchestratorRequest, + stub: stubs.TaskHubSidecarServiceStub, + completionToken, + ): try: executor = _OrchestrationExecutor(self._registry, self._logger) result = executor.execute(req.instanceId, req.pastEvents, req.newEvents) res = pb.OrchestratorResponse( instanceId=req.instanceId, actions=result.actions, - customStatus=pbh.get_string_value(result.encoded_custom_status), - completionToken=completionToken) + customStatus=ph.get_string_value(result.encoded_custom_status), + completionToken=completionToken, + ) except Exception as ex: - self._logger.exception(f"An error occurred while trying to execute instance '{req.instanceId}': {ex}") - failure_details = pbh.new_failure_details(ex) - actions = [pbh.new_complete_orchestration_action(-1, pb.ORCHESTRATION_STATUS_FAILED, "", failure_details)] - res = pb.OrchestratorResponse(instanceId=req.instanceId, actions=actions, completionToken=completionToken) + self._logger.exception( + f"An error occurred while trying to execute instance '{req.instanceId}': {ex}" + ) + failure_details = ph.new_failure_details(ex) + actions = [ + ph.new_complete_orchestration_action( + -1, pb.ORCHESTRATION_STATUS_FAILED, "", failure_details + ) + ] + res = pb.OrchestratorResponse( + instanceId=req.instanceId, + actions=actions, + completionToken=completionToken, + ) try: stub.CompleteOrchestratorTask(res) except Exception as ex: - self._logger.exception(f"Failed to deliver orchestrator response for '{req.instanceId}' to sidecar: {ex}") - - def _execute_activity(self, req: pb.ActivityRequest, stub: stubs.TaskHubSidecarServiceStub, completionToken): + self._logger.exception( + f"Failed to deliver orchestrator response for '{req.instanceId}' to sidecar: {ex}" + ) + + def _execute_activity( + self, + req: pb.ActivityRequest, + stub: stubs.TaskHubSidecarServiceStub, + completionToken, + ): instance_id = req.orchestrationInstance.instanceId try: executor = _ActivityExecutor(self._registry, self._logger) - result = executor.execute(instance_id, req.name, req.taskId, req.input.value) + result = executor.execute( + instance_id, req.name, req.taskId, req.input.value + ) res = pb.ActivityResponse( instanceId=instance_id, taskId=req.taskId, - result=pbh.get_string_value(result), - completionToken=completionToken) + result=ph.get_string_value(result), + completionToken=completionToken, + ) except Exception as ex: res = pb.ActivityResponse( instanceId=instance_id, taskId=req.taskId, - failureDetails=pbh.new_failure_details(ex), - completionToken=completionToken) + failureDetails=ph.new_failure_details(ex), + completionToken=completionToken, + ) try: stub.CompleteActivityTask(res) except Exception as ex: self._logger.exception( - f"Failed to deliver activity response for '{req.name}#{req.taskId}' of orchestration ID '{instance_id}' to sidecar: {ex}") + f"Failed to deliver activity response for '{req.name}#{req.taskId}' of orchestration ID '{instance_id}' to sidecar: {ex}" + ) class _RuntimeOrchestrationContext(task.OrchestrationContext): @@ -273,7 +601,9 @@ def run(self, generator: Generator[task.Task, Any, Any]): def resume(self): if self._generator is None: # This is never expected unless maybe there's an issue with the history - raise TypeError("The orchestrator generator is not initialized! Was the orchestration history corrupted?") + raise TypeError( + "The orchestrator generator is not initialized! Was the orchestration history corrupted?" + ) # We can resume the generator only if the previously yielded task # has reached a completed state. The only time this won't be the @@ -294,7 +624,12 @@ def resume(self): raise TypeError("The orchestrator generator yielded a non-Task object") self._previous_task = next_task - def set_complete(self, result: Any, status: pb.OrchestrationStatus, is_result_encoded: bool = False): + def set_complete( + self, + result: Any, + status: pb.OrchestrationStatus, + is_result_encoded: bool = False, + ): if self._is_complete: return @@ -307,7 +642,8 @@ def set_complete(self, result: Any, status: pb.OrchestrationStatus, is_result_en if result is not None: result_json = result if is_result_encoded else shared.to_json(result) action = ph.new_complete_orchestration_action( - self.next_sequence_number(), status, result_json) + self.next_sequence_number(), status, result_json + ) self._pending_actions[action.id] = action def set_failed(self, ex: Exception): @@ -319,7 +655,10 @@ def set_failed(self, ex: Exception): self._completion_status = pb.ORCHESTRATION_STATUS_FAILED action = ph.new_complete_orchestration_action( - self.next_sequence_number(), pb.ORCHESTRATION_STATUS_FAILED, None, ph.new_failure_details(ex) + self.next_sequence_number(), + pb.ORCHESTRATION_STATUS_FAILED, + None, + ph.new_failure_details(ex), ) self._pending_actions[action.id] = action @@ -343,14 +682,21 @@ def get_actions(self) -> list[pb.OrchestratorAction]: # replayed when the new instance starts. for event_name, values in self._received_events.items(): for event_value in values: - encoded_value = shared.to_json(event_value) if event_value else None - carryover_events.append(ph.new_event_raised_event(event_name, encoded_value)) + encoded_value = ( + shared.to_json(event_value) if event_value else None + ) + carryover_events.append( + ph.new_event_raised_event(event_name, encoded_value) + ) action = ph.new_complete_orchestration_action( self.next_sequence_number(), pb.ORCHESTRATION_STATUS_CONTINUED_AS_NEW, - result=shared.to_json(self._new_input) if self._new_input is not None else None, + result=shared.to_json(self._new_input) + if self._new_input is not None + else None, failure_details=None, - carryover_events=carryover_events) + carryover_events=carryover_events, + ) return [action] else: return list(self._pending_actions.values()) @@ -367,60 +713,84 @@ def instance_id(self) -> str: def current_utc_datetime(self) -> datetime: return self._current_utc_datetime - @property - def is_replaying(self) -> bool: - return self._is_replaying - @current_utc_datetime.setter def current_utc_datetime(self, value: datetime): self._current_utc_datetime = value + @property + def is_replaying(self) -> bool: + return self._is_replaying + def set_custom_status(self, custom_status: Any) -> None: - self._encoded_custom_status = shared.to_json(custom_status) if custom_status is not None else None + self._encoded_custom_status = ( + shared.to_json(custom_status) if custom_status is not None else None + ) def create_timer(self, fire_at: Union[datetime, timedelta]) -> task.Task: return self.create_timer_internal(fire_at) - def create_timer_internal(self, fire_at: Union[datetime, timedelta], - retryable_task: Optional[task.RetryableTask] = None) -> task.Task: + def create_timer_internal( + self, + fire_at: Union[datetime, timedelta], + retryable_task: Optional[task.RetryableTask] = None, + ) -> task.Task: id = self.next_sequence_number() if isinstance(fire_at, timedelta): fire_at = self.current_utc_datetime + fire_at action = ph.new_create_timer_action(id, fire_at) self._pending_actions[id] = action - timer_task = task.TimerTask() + timer_task: task.TimerTask = task.TimerTask() if retryable_task is not None: timer_task.set_retryable_parent(retryable_task) self._pending_tasks[id] = timer_task return timer_task - def call_activity(self, activity: Union[task.Activity[TInput, TOutput], str], *, - input: Optional[TInput] = None, - retry_policy: Optional[task.RetryPolicy] = None) -> task.Task[TOutput]: + def call_activity( + self, + activity: Union[task.Activity[TInput, TOutput], str], + *, + input: Optional[TInput] = None, + retry_policy: Optional[task.RetryPolicy] = None, + ) -> task.Task[TOutput]: id = self.next_sequence_number() - self.call_activity_function_helper(id, activity, input=input, retry_policy=retry_policy, - is_sub_orch=False) + self.call_activity_function_helper( + id, activity, input=input, retry_policy=retry_policy, is_sub_orch=False + ) return self._pending_tasks.get(id, task.CompletableTask()) - def call_sub_orchestrator(self, orchestrator: task.Orchestrator[TInput, TOutput], *, - input: Optional[TInput] = None, - instance_id: Optional[str] = None, - retry_policy: Optional[task.RetryPolicy] = None) -> task.Task[TOutput]: + def call_sub_orchestrator( + self, + orchestrator: task.Orchestrator[TInput, TOutput], + *, + input: Optional[TInput] = None, + instance_id: Optional[str] = None, + retry_policy: Optional[task.RetryPolicy] = None, + ) -> task.Task[TOutput]: id = self.next_sequence_number() orchestrator_name = task.get_name(orchestrator) - self.call_activity_function_helper(id, orchestrator_name, input=input, retry_policy=retry_policy, - is_sub_orch=True, instance_id=instance_id) + self.call_activity_function_helper( + id, + orchestrator_name, + input=input, + retry_policy=retry_policy, + is_sub_orch=True, + instance_id=instance_id, + ) return self._pending_tasks.get(id, task.CompletableTask()) - def call_activity_function_helper(self, id: Optional[int], - activity_function: Union[task.Activity[TInput, TOutput], str], *, - input: Optional[TInput] = None, - retry_policy: Optional[task.RetryPolicy] = None, - is_sub_orch: bool = False, - instance_id: Optional[str] = None, - fn_task: Optional[task.CompletableTask[TOutput]] = None): + def call_activity_function_helper( + self, + id: Optional[int], + activity_function: Union[task.Activity[TInput, TOutput], str], + *, + input: Optional[TInput] = None, + retry_policy: Optional[task.RetryPolicy] = None, + is_sub_orch: bool = False, + instance_id: Optional[str] = None, + fn_task: Optional[task.CompletableTask[TOutput]] = None, + ): if id is None: id = self.next_sequence_number() @@ -431,7 +801,11 @@ def call_activity_function_helper(self, id: Optional[int], # We just need to take string representation of it. encoded_input = str(input) if not is_sub_orch: - name = activity_function if isinstance(activity_function, str) else task.get_name(activity_function) + name = ( + activity_function + if isinstance(activity_function, str) + else task.get_name(activity_function) + ) action = ph.new_schedule_task_action(id, name, encoded_input) else: if instance_id is None: @@ -439,16 +813,21 @@ def call_activity_function_helper(self, id: Optional[int], instance_id = f"{self.instance_id}:{id:04x}" if not isinstance(activity_function, str): raise ValueError("Orchestrator function name must be a string") - action = ph.new_create_sub_orchestration_action(id, activity_function, instance_id, encoded_input) + action = ph.new_create_sub_orchestration_action( + id, activity_function, instance_id, encoded_input + ) self._pending_actions[id] = action if fn_task is None: if retry_policy is None: fn_task = task.CompletableTask[TOutput]() else: - fn_task = task.RetryableTask[TOutput](retry_policy=retry_policy, action=action, - start_time=self.current_utc_datetime, - is_sub_orch=is_sub_orch) + fn_task = task.RetryableTask[TOutput]( + retry_policy=retry_policy, + action=action, + start_time=self.current_utc_datetime, + is_sub_orch=is_sub_orch, + ) self._pending_tasks[id] = fn_task def wait_for_external_event(self, name: str) -> task.Task: @@ -457,7 +836,7 @@ def wait_for_external_event(self, name: str) -> task.Task: # event with the given name so that we can resume the generator when it # arrives. If there are multiple events with the same name, we return # them in the order they were received. - external_event_task = task.CompletableTask() + external_event_task: task.CompletableTask = task.CompletableTask() event_name = name.casefold() event_list = self._received_events.get(event_name, None) if event_list: @@ -484,7 +863,9 @@ class ExecutionResults: actions: list[pb.OrchestratorAction] encoded_custom_status: Optional[str] - def __init__(self, actions: list[pb.OrchestratorAction], encoded_custom_status: Optional[str]): + def __init__( + self, actions: list[pb.OrchestratorAction], encoded_custom_status: Optional[str] + ): self.actions = actions self.encoded_custom_status = encoded_custom_status @@ -498,14 +879,23 @@ def __init__(self, registry: _Registry, logger: logging.Logger): self._is_suspended = False self._suspended_events: list[pb.HistoryEvent] = [] - def execute(self, instance_id: str, old_events: Sequence[pb.HistoryEvent], new_events: Sequence[pb.HistoryEvent]) -> ExecutionResults: + def execute( + self, + instance_id: str, + old_events: Sequence[pb.HistoryEvent], + new_events: Sequence[pb.HistoryEvent], + ) -> ExecutionResults: if not new_events: - raise task.OrchestrationStateError("The new history event list must have at least one event in it.") + raise task.OrchestrationStateError( + "The new history event list must have at least one event in it." + ) ctx = _RuntimeOrchestrationContext(instance_id) try: # Rebuild local state by replaying old history into the orchestrator function - self._logger.debug(f"{instance_id}: Rebuilding local state with {len(old_events)} history event...") + self._logger.debug( + f"{instance_id}: Rebuilding local state with {len(old_events)} history event..." + ) ctx._is_replaying = True for old_event in old_events: self.process_event(ctx, old_event) @@ -513,7 +903,9 @@ def execute(self, instance_id: str, old_events: Sequence[pb.HistoryEvent], new_e # Get new actions by executing newly received events into the orchestrator function if self._logger.level <= logging.DEBUG: summary = _get_new_event_summary(new_events) - self._logger.debug(f"{instance_id}: Processing {len(new_events)} new event(s): {summary}") + self._logger.debug( + f"{instance_id}: Processing {len(new_events)} new event(s): {summary}" + ) ctx._is_replaying = False for new_event in new_events: self.process_event(ctx, new_event) @@ -525,17 +917,31 @@ def execute(self, instance_id: str, old_events: Sequence[pb.HistoryEvent], new_e if not ctx._is_complete: task_count = len(ctx._pending_tasks) event_count = len(ctx._pending_events) - self._logger.info(f"{instance_id}: Orchestrator yielded with {task_count} task(s) and {event_count} event(s) outstanding.") - elif ctx._completion_status and ctx._completion_status is not pb.ORCHESTRATION_STATUS_CONTINUED_AS_NEW: - completion_status_str = pbh.get_orchestration_status_str(ctx._completion_status) - self._logger.info(f"{instance_id}: Orchestration completed with status: {completion_status_str}") + self._logger.info( + f"{instance_id}: Orchestrator yielded with {task_count} task(s) and {event_count} event(s) outstanding." + ) + elif ( + ctx._completion_status and ctx._completion_status is not pb.ORCHESTRATION_STATUS_CONTINUED_AS_NEW + ): + completion_status_str = ph.get_orchestration_status_str( + ctx._completion_status + ) + self._logger.info( + f"{instance_id}: Orchestration completed with status: {completion_status_str}" + ) actions = ctx.get_actions() if self._logger.level <= logging.DEBUG: - self._logger.debug(f"{instance_id}: Returning {len(actions)} action(s): {_get_action_summary(actions)}") - return ExecutionResults(actions=actions, encoded_custom_status=ctx._encoded_custom_status) + self._logger.debug( + f"{instance_id}: Returning {len(actions)} action(s): {_get_action_summary(actions)}" + ) + return ExecutionResults( + actions=actions, encoded_custom_status=ctx._encoded_custom_status + ) - def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEvent) -> None: + def process_event( + self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEvent + ) -> None: if self._is_suspended and _is_suspendable(event): # We are suspended, so we need to buffer this event until we are resumed self._suspended_events.append(event) @@ -550,14 +956,19 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven fn = self._registry.get_orchestrator(event.executionStarted.name) if fn is None: raise OrchestratorNotRegisteredError( - f"A '{event.executionStarted.name}' orchestrator was not registered.") + f"A '{event.executionStarted.name}' orchestrator was not registered." + ) # deserialize the input, if any input = None - if event.executionStarted.input is not None and event.executionStarted.input.value != "": + if ( + event.executionStarted.input is not None and event.executionStarted.input.value != "" + ): input = shared.from_json(event.executionStarted.input.value) - result = fn(ctx, input) # this does not execute the generator, only creates it + result = fn( + ctx, input + ) # this does not execute the generator, only creates it if isinstance(result, GeneratorType): # Start the orchestrator's generator function ctx.run(result) @@ -570,10 +981,14 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven timer_id = event.eventId action = ctx._pending_actions.pop(timer_id, None) if not action: - raise _get_non_determinism_error(timer_id, task.get_name(ctx.create_timer)) + raise _get_non_determinism_error( + timer_id, task.get_name(ctx.create_timer) + ) elif not action.HasField("createTimer"): expected_method_name = task.get_name(ctx.create_timer) - raise _get_wrong_action_type_error(timer_id, expected_method_name, action) + raise _get_wrong_action_type_error( + timer_id, expected_method_name, action + ) elif event.HasField("timerFired"): timer_id = event.timerFired.timerId timer_task = ctx._pending_tasks.pop(timer_id, None) @@ -581,7 +996,8 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven # TODO: Should this be an error? When would it ever happen? if not ctx._is_replaying: self._logger.warning( - f"{ctx.instance_id}: Ignoring unexpected timerFired event with ID = {timer_id}.") + f"{ctx.instance_id}: Ignoring unexpected timerFired event with ID = {timer_id}." + ) return timer_task.complete(None) if timer_task._retryable_parent is not None: @@ -593,12 +1009,15 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven else: cur_task = activity_action.createSubOrchestration instance_id = cur_task.instanceId - ctx.call_activity_function_helper(id=activity_action.id, activity_function=cur_task.name, - input=cur_task.input.value, - retry_policy=timer_task._retryable_parent._retry_policy, - is_sub_orch=timer_task._retryable_parent._is_sub_orch, - instance_id=instance_id, - fn_task=timer_task._retryable_parent) + ctx.call_activity_function_helper( + id=activity_action.id, + activity_function=cur_task.name, + input=cur_task.input.value, + retry_policy=timer_task._retryable_parent._retry_policy, + is_sub_orch=timer_task._retryable_parent._is_sub_orch, + instance_id=instance_id, + fn_task=timer_task._retryable_parent, + ) else: ctx.resume() elif event.HasField("taskScheduled"): @@ -608,16 +1027,21 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven action = ctx._pending_actions.pop(task_id, None) activity_task = ctx._pending_tasks.get(task_id, None) if not action: - raise _get_non_determinism_error(task_id, task.get_name(ctx.call_activity)) + raise _get_non_determinism_error( + task_id, task.get_name(ctx.call_activity) + ) elif not action.HasField("scheduleTask"): expected_method_name = task.get_name(ctx.call_activity) - raise _get_wrong_action_type_error(task_id, expected_method_name, action) + raise _get_wrong_action_type_error( + task_id, expected_method_name, action + ) elif action.scheduleTask.name != event.taskScheduled.name: raise _get_wrong_action_name_error( task_id, method_name=task.get_name(ctx.call_activity), expected_task_name=event.taskScheduled.name, - actual_task_name=action.scheduleTask.name) + actual_task_name=action.scheduleTask.name, + ) elif event.HasField("taskCompleted"): # This history event contains the result of a completed activity task. task_id = event.taskCompleted.taskScheduledId @@ -626,7 +1050,8 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven # TODO: Should this be an error? When would it ever happen? if not ctx.is_replaying: self._logger.warning( - f"{ctx.instance_id}: Ignoring unexpected taskCompleted event with ID = {task_id}.") + f"{ctx.instance_id}: Ignoring unexpected taskCompleted event with ID = {task_id}." + ) return result = None if not ph.is_empty(event.taskCompleted.result): @@ -640,7 +1065,8 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven # TODO: Should this be an error? When would it ever happen? if not ctx.is_replaying: self._logger.warning( - f"{ctx.instance_id}: Ignoring unexpected taskFailed event with ID = {task_id}.") + f"{ctx.instance_id}: Ignoring unexpected taskFailed event with ID = {task_id}." + ) return if isinstance(activity_task, task.RetryableTask): @@ -649,7 +1075,8 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven if next_delay is None: activity_task.fail( f"{ctx.instance_id}: Activity task #{task_id} failed: {event.taskFailed.failureDetails.errorMessage}", - event.taskFailed.failureDetails) + event.taskFailed.failureDetails, + ) ctx.resume() else: activity_task.increment_attempt_count() @@ -657,7 +1084,8 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven elif isinstance(activity_task, task.CompletableTask): activity_task.fail( f"{ctx.instance_id}: Activity task #{task_id} failed: {event.taskFailed.failureDetails.errorMessage}", - event.taskFailed.failureDetails) + event.taskFailed.failureDetails, + ) ctx.resume() else: raise TypeError("Unexpected task type") @@ -667,16 +1095,23 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven task_id = event.eventId action = ctx._pending_actions.pop(task_id, None) if not action: - raise _get_non_determinism_error(task_id, task.get_name(ctx.call_sub_orchestrator)) + raise _get_non_determinism_error( + task_id, task.get_name(ctx.call_sub_orchestrator) + ) elif not action.HasField("createSubOrchestration"): expected_method_name = task.get_name(ctx.call_sub_orchestrator) - raise _get_wrong_action_type_error(task_id, expected_method_name, action) - elif action.createSubOrchestration.name != event.subOrchestrationInstanceCreated.name: + raise _get_wrong_action_type_error( + task_id, expected_method_name, action + ) + elif ( + action.createSubOrchestration.name != event.subOrchestrationInstanceCreated.name + ): raise _get_wrong_action_name_error( task_id, method_name=task.get_name(ctx.call_sub_orchestrator), expected_task_name=event.subOrchestrationInstanceCreated.name, - actual_task_name=action.createSubOrchestration.name) + actual_task_name=action.createSubOrchestration.name, + ) elif event.HasField("subOrchestrationInstanceCompleted"): task_id = event.subOrchestrationInstanceCompleted.taskScheduledId sub_orch_task = ctx._pending_tasks.pop(task_id, None) @@ -684,11 +1119,14 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven # TODO: Should this be an error? When would it ever happen? if not ctx.is_replaying: self._logger.warning( - f"{ctx.instance_id}: Ignoring unexpected subOrchestrationInstanceCompleted event with ID = {task_id}.") + f"{ctx.instance_id}: Ignoring unexpected subOrchestrationInstanceCompleted event with ID = {task_id}." + ) return result = None if not ph.is_empty(event.subOrchestrationInstanceCompleted.result): - result = shared.from_json(event.subOrchestrationInstanceCompleted.result.value) + result = shared.from_json( + event.subOrchestrationInstanceCompleted.result.value + ) sub_orch_task.complete(result) ctx.resume() elif event.HasField("subOrchestrationInstanceFailed"): @@ -699,7 +1137,8 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven # TODO: Should this be an error? When would it ever happen? if not ctx.is_replaying: self._logger.warning( - f"{ctx.instance_id}: Ignoring unexpected subOrchestrationInstanceFailed event with ID = {task_id}.") + f"{ctx.instance_id}: Ignoring unexpected subOrchestrationInstanceFailed event with ID = {task_id}." + ) return if isinstance(sub_orch_task, task.RetryableTask): if sub_orch_task._retry_policy is not None: @@ -707,7 +1146,8 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven if next_delay is None: sub_orch_task.fail( f"Sub-orchestration task #{task_id} failed: {failedEvent.failureDetails.errorMessage}", - failedEvent.failureDetails) + failedEvent.failureDetails, + ) ctx.resume() else: sub_orch_task.increment_attempt_count() @@ -715,7 +1155,8 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven elif isinstance(sub_orch_task, task.CompletableTask): sub_orch_task.fail( f"Sub-orchestration task #{task_id} failed: {failedEvent.failureDetails.errorMessage}", - failedEvent.failureDetails) + failedEvent.failureDetails, + ) ctx.resume() else: raise TypeError("Unexpected sub-orchestration task type") @@ -744,7 +1185,9 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven decoded_result = shared.from_json(event.eventRaised.input.value) event_list.append(decoded_result) if not ctx.is_replaying: - self._logger.info(f"{ctx.instance_id}: Event '{event_name}' has been buffered as there are no tasks waiting for it.") + self._logger.info( + f"{ctx.instance_id}: Event '{event_name}' has been buffered as there are no tasks waiting for it." + ) elif event.HasField("executionSuspended"): if not self._is_suspended and not ctx.is_replaying: self._logger.info(f"{ctx.instance_id}: Execution suspended.") @@ -759,11 +1202,21 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven elif event.HasField("executionTerminated"): if not ctx.is_replaying: self._logger.info(f"{ctx.instance_id}: Execution terminating.") - encoded_output = event.executionTerminated.input.value if not ph.is_empty(event.executionTerminated.input) else None - ctx.set_complete(encoded_output, pb.ORCHESTRATION_STATUS_TERMINATED, is_result_encoded=True) + encoded_output = ( + event.executionTerminated.input.value + if not ph.is_empty(event.executionTerminated.input) + else None + ) + ctx.set_complete( + encoded_output, + pb.ORCHESTRATION_STATUS_TERMINATED, + is_result_encoded=True, + ) else: eventType = event.WhichOneof("eventType") - raise task.OrchestrationStateError(f"Don't know how to handle event of type '{eventType}'") + raise task.OrchestrationStateError( + f"Don't know how to handle event of type '{eventType}'" + ) except StopIteration as generatorStopped: # The orchestrator generator function completed ctx.set_complete(generatorStopped.value, pb.ORCHESTRATION_STATUS_COMPLETED) @@ -774,12 +1227,22 @@ def __init__(self, registry: _Registry, logger: logging.Logger): self._registry = registry self._logger = logger - def execute(self, orchestration_id: str, name: str, task_id: int, encoded_input: Optional[str]) -> Optional[str]: + def execute( + self, + orchestration_id: str, + name: str, + task_id: int, + encoded_input: Optional[str], + ) -> Optional[str]: """Executes an activity function and returns the serialized result, if any.""" - self._logger.debug(f"{orchestration_id}/{task_id}: Executing activity '{name}'...") + self._logger.debug( + f"{orchestration_id}/{task_id}: Executing activity '{name}'..." + ) fn = self._registry.get_activity(name) if not fn: - raise ActivityNotRegisteredError(f"Activity function named '{name}' was not registered!") + raise ActivityNotRegisteredError( + f"Activity function named '{name}' was not registered!" + ) activity_input = shared.from_json(encoded_input) if encoded_input else None ctx = task.ActivityContext(orchestration_id, task_id) @@ -787,49 +1250,54 @@ def execute(self, orchestration_id: str, name: str, task_id: int, encoded_input: # Execute the activity function activity_output = fn(ctx, activity_input) - encoded_output = shared.to_json(activity_output) if activity_output is not None else None + encoded_output = ( + shared.to_json(activity_output) if activity_output is not None else None + ) chars = len(encoded_output) if encoded_output else 0 self._logger.debug( - f"{orchestration_id}/{task_id}: Activity '{name}' completed successfully with {chars} char(s) of encoded output.") + f"{orchestration_id}/{task_id}: Activity '{name}' completed successfully with {chars} char(s) of encoded output." + ) return encoded_output -def _get_non_determinism_error(task_id: int, action_name: str) -> task.NonDeterminismError: +def _get_non_determinism_error( + task_id: int, action_name: str +) -> task.NonDeterminismError: return task.NonDeterminismError( f"A previous execution called {action_name} with ID={task_id}, but the current " f"execution doesn't have this action with this ID. This problem occurs when either " f"the orchestration has non-deterministic logic or if the code was changed after an " - f"instance of this orchestration already started running.") + f"instance of this orchestration already started running." + ) def _get_wrong_action_type_error( - task_id: int, - expected_method_name: str, - action: pb.OrchestratorAction) -> task.NonDeterminismError: + task_id: int, expected_method_name: str, action: pb.OrchestratorAction +) -> task.NonDeterminismError: unexpected_method_name = _get_method_name_for_action(action) return task.NonDeterminismError( f"Failed to restore orchestration state due to a history mismatch: A previous execution called " f"{expected_method_name} with ID={task_id}, but the current execution is instead trying to call " f"{unexpected_method_name} as part of rebuilding it's history. This kind of mismatch can happen if an " f"orchestration has non-deterministic logic or if the code was changed after an instance of this " - f"orchestration already started running.") + f"orchestration already started running." + ) def _get_wrong_action_name_error( - task_id: int, - method_name: str, - expected_task_name: str, - actual_task_name: str) -> task.NonDeterminismError: + task_id: int, method_name: str, expected_task_name: str, actual_task_name: str +) -> task.NonDeterminismError: return task.NonDeterminismError( f"Failed to restore orchestration state due to a history mismatch: A previous execution called " f"{method_name} with name='{expected_task_name}' and sequence number {task_id}, but the current " f"execution is instead trying to call {actual_task_name} as part of rebuilding it's history. " f"This kind of mismatch can happen if an orchestration has non-deterministic logic or if the code " - f"was changed after an instance of this orchestration already started running.") + f"was changed after an instance of this orchestration already started running." + ) def _get_method_name_for_action(action: pb.OrchestratorAction) -> str: - action_type = action.WhichOneof('orchestratorActionType') + action_type = action.WhichOneof("orchestratorActionType") if action_type == "scheduleTask": return task.get_name(task.OrchestrationContext.call_activity) elif action_type == "createTimer": @@ -851,7 +1319,7 @@ def _get_new_event_summary(new_events: Sequence[pb.HistoryEvent]) -> str: else: counts: dict[str, int] = {} for event in new_events: - event_type = event.WhichOneof('eventType') + event_type = event.WhichOneof("eventType") counts[event_type] = counts.get(event_type, 0) + 1 return f"[{', '.join(f'{name}={count}' for name, count in counts.items())}]" @@ -865,11 +1333,210 @@ def _get_action_summary(new_actions: Sequence[pb.OrchestratorAction]) -> str: else: counts: dict[str, int] = {} for action in new_actions: - action_type = action.WhichOneof('orchestratorActionType') + action_type = action.WhichOneof("orchestratorActionType") counts[action_type] = counts.get(action_type, 0) + 1 return f"[{', '.join(f'{name}={count}' for name, count in counts.items())}]" def _is_suspendable(event: pb.HistoryEvent) -> bool: """Returns true if the event is one that can be suspended and resumed.""" - return event.WhichOneof("eventType") not in ["executionResumed", "executionTerminated"] + return event.WhichOneof("eventType") not in [ + "executionResumed", + "executionTerminated", + ] + + +class _AsyncWorkerManager: + def __init__(self, concurrency_options: ConcurrencyOptions): + self.concurrency_options = concurrency_options + self.activity_semaphore = None + self.orchestration_semaphore = None + # Don't create queues here - defer until we have an event loop + self.activity_queue: Optional[asyncio.Queue] = None + self.orchestration_queue: Optional[asyncio.Queue] = None + self._queue_event_loop: Optional[asyncio.AbstractEventLoop] = None + # Store work items when no event loop is available + self._pending_activity_work: list = [] + self._pending_orchestration_work: list = [] + self.thread_pool = ThreadPoolExecutor( + max_workers=concurrency_options.maximum_thread_pool_workers, + thread_name_prefix="DurableTask", + ) + self._shutdown = False + + def _ensure_queues_for_current_loop(self): + """Ensure queues are bound to the current event loop.""" + try: + current_loop = asyncio.get_running_loop() + except RuntimeError: + # No event loop running, can't create queues + return + + # Check if queues are already properly set up for current loop + if self._queue_event_loop is current_loop: + if self.activity_queue is not None and self.orchestration_queue is not None: + # Queues are already bound to the current loop and exist + return + + # Need to recreate queues for the current event loop + # First, preserve any existing work items + existing_activity_items = [] + existing_orchestration_items = [] + + if self.activity_queue is not None: + try: + while not self.activity_queue.empty(): + existing_activity_items.append(self.activity_queue.get_nowait()) + except Exception: + pass + + if self.orchestration_queue is not None: + try: + while not self.orchestration_queue.empty(): + existing_orchestration_items.append( + self.orchestration_queue.get_nowait() + ) + except Exception: + pass + + # Create fresh queues for the current event loop + self.activity_queue = asyncio.Queue() + self.orchestration_queue = asyncio.Queue() + self._queue_event_loop = current_loop + + # Restore the work items to the new queues + for item in existing_activity_items: + self.activity_queue.put_nowait(item) + for item in existing_orchestration_items: + self.orchestration_queue.put_nowait(item) + + # Move pending work items to the queues + for item in self._pending_activity_work: + self.activity_queue.put_nowait(item) + for item in self._pending_orchestration_work: + self.orchestration_queue.put_nowait(item) + + # Clear the pending work lists + self._pending_activity_work.clear() + self._pending_orchestration_work.clear() + + async def run(self): + # Reset shutdown flag in case this manager is being reused + self._shutdown = False + + # Ensure queues are properly bound to the current event loop + self._ensure_queues_for_current_loop() + + # Create semaphores in the current event loop + self.activity_semaphore = asyncio.Semaphore( + self.concurrency_options.maximum_concurrent_activity_work_items + ) + self.orchestration_semaphore = asyncio.Semaphore( + self.concurrency_options.maximum_concurrent_orchestration_work_items + ) + + # Start background consumers for each work type + if self.activity_queue is not None and self.orchestration_queue is not None: + await asyncio.gather( + self._consume_queue(self.activity_queue, self.activity_semaphore), + self._consume_queue( + self.orchestration_queue, self.orchestration_semaphore + ), + ) + + async def _consume_queue(self, queue: asyncio.Queue, semaphore: asyncio.Semaphore): + # List to track running tasks + running_tasks: set[asyncio.Task] = set() + + while True: + # Clean up completed tasks + done_tasks = {task for task in running_tasks if task.done()} + running_tasks -= done_tasks + + # Exit if shutdown is set and the queue is empty and no tasks are running + if self._shutdown and queue.empty() and not running_tasks: + break + + try: + work = await asyncio.wait_for(queue.get(), timeout=1.0) + except asyncio.TimeoutError: + continue + + func, args, kwargs = work + # Create a concurrent task for processing + task = asyncio.create_task( + self._process_work_item(semaphore, queue, func, args, kwargs) + ) + running_tasks.add(task) + + async def _process_work_item( + self, semaphore: asyncio.Semaphore, queue: asyncio.Queue, func, args, kwargs + ): + async with semaphore: + try: + await self._run_func(func, *args, **kwargs) + finally: + queue.task_done() + + async def _run_func(self, func, *args, **kwargs): + if inspect.iscoroutinefunction(func): + return await func(*args, **kwargs) + else: + loop = asyncio.get_running_loop() + # Avoid submitting to executor after shutdown + if ( + getattr(self, "_shutdown", False) and getattr(self, "thread_pool", None) and getattr( + self.thread_pool, "_shutdown", False) + ): + return None + return await loop.run_in_executor( + self.thread_pool, lambda: func(*args, **kwargs) + ) + + def submit_activity(self, func, *args, **kwargs): + work_item = (func, args, kwargs) + self._ensure_queues_for_current_loop() + if self.activity_queue is not None: + self.activity_queue.put_nowait(work_item) + else: + # No event loop running, store in pending list + self._pending_activity_work.append(work_item) + + def submit_orchestration(self, func, *args, **kwargs): + work_item = (func, args, kwargs) + self._ensure_queues_for_current_loop() + if self.orchestration_queue is not None: + self.orchestration_queue.put_nowait(work_item) + else: + # No event loop running, store in pending list + self._pending_orchestration_work.append(work_item) + + def shutdown(self): + self._shutdown = True + self.thread_pool.shutdown(wait=True) + + def reset_for_new_run(self): + """Reset the manager state for a new run.""" + self._shutdown = False + # Clear any existing queues - they'll be recreated when needed + if self.activity_queue is not None: + # Clear existing queue by creating a new one + # This ensures no items from previous runs remain + try: + while not self.activity_queue.empty(): + self.activity_queue.get_nowait() + except Exception: + pass + if self.orchestration_queue is not None: + try: + while not self.orchestration_queue.empty(): + self.orchestration_queue.get_nowait() + except Exception: + pass + # Clear pending work lists + self._pending_activity_work.clear() + self._pending_orchestration_work.clear() + + +# Export public API +__all__ = ["ConcurrencyOptions", "TaskHubGrpcWorker"] diff --git a/examples/README.md b/examples/README.md index 7cfbc7a..404b127 100644 --- a/examples/README.md +++ b/examples/README.md @@ -24,4 +24,4 @@ In some cases, the sample may require command-line parameters or user inputs. In - [Activity sequence](./activity_sequence.py): Orchestration that schedules three activity calls in a sequence. - [Fan-out/fan-in](./fanout_fanin.py): Orchestration that schedules a dynamic number of activity calls in parallel, waits for all of them to complete, and then performs an aggregation on the results. -- [Human interaction](./human_interaction.py): Orchestration that waits for a human to approve an order before continuing. \ No newline at end of file +- [Human interaction](./human_interaction.py): Orchestration that waits for a human to approve an order before continuing. diff --git a/pyproject.toml b/pyproject.toml index 60a9d37..1491988 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -9,7 +9,7 @@ build-backend = "setuptools.build_meta" [project] name = "durabletask" -version = "0.2.1" +version = "0.3.0" description = "A Durable Task Client SDK for Python" keywords = [ "durable", diff --git a/tests/durabletask/test_client.py b/tests/durabletask/test_client.py index 64bbec8..e750134 100644 --- a/tests/durabletask/test_client.py +++ b/tests/durabletask/test_client.py @@ -1,13 +1,14 @@ -from unittest.mock import patch, ANY +from unittest.mock import ANY, patch +from durabletask.internal.grpc_interceptor import DefaultClientInterceptorImpl from durabletask.internal.shared import (get_default_host_address, get_grpc_channel) -from durabletask.internal.grpc_interceptor import DefaultClientInterceptorImpl HOST_ADDRESS = 'localhost:50051' METADATA = [('key1', 'value1'), ('key2', 'value2')] INTERCEPTORS = [DefaultClientInterceptorImpl(METADATA)] + def test_get_grpc_channel_insecure(): with patch('grpc.insecure_channel') as mock_channel: get_grpc_channel(HOST_ADDRESS, False, interceptors=INTERCEPTORS) @@ -85,4 +86,4 @@ def test_grpc_channel_with_host_name_protocol_stripping(): prefix = "" get_grpc_channel(prefix + host_name, True, interceptors=INTERCEPTORS) - mock_secure_channel.assert_called_with(host_name, ANY) \ No newline at end of file + mock_secure_channel.assert_called_with(host_name, ANY) diff --git a/tests/durabletask/test_concurrency_options.py b/tests/durabletask/test_concurrency_options.py new file mode 100644 index 0000000..b49b7ec --- /dev/null +++ b/tests/durabletask/test_concurrency_options.py @@ -0,0 +1,96 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +import os + +from durabletask.worker import ConcurrencyOptions, TaskHubGrpcWorker + + +def test_default_concurrency_options(): + """Test that default concurrency options work correctly.""" + options = ConcurrencyOptions() + processor_count = os.cpu_count() or 1 + expected_default = 100 * processor_count + expected_workers = processor_count + 4 + + assert options.maximum_concurrent_activity_work_items == expected_default + assert options.maximum_concurrent_orchestration_work_items == expected_default + assert options.maximum_thread_pool_workers == expected_workers + + +def test_custom_concurrency_options(): + """Test that custom concurrency options work correctly.""" + options = ConcurrencyOptions( + maximum_concurrent_activity_work_items=50, + maximum_concurrent_orchestration_work_items=25, + maximum_thread_pool_workers=30, + ) + + assert options.maximum_concurrent_activity_work_items == 50 + assert options.maximum_concurrent_orchestration_work_items == 25 + assert options.maximum_thread_pool_workers == 30 + + +def test_partial_custom_options(): + """Test that partially specified options use defaults for unspecified values.""" + processor_count = os.cpu_count() or 1 + expected_default = 100 * processor_count + expected_workers = processor_count + 4 + + options = ConcurrencyOptions( + maximum_concurrent_activity_work_items=30 + ) + + assert options.maximum_concurrent_activity_work_items == 30 + assert options.maximum_concurrent_orchestration_work_items == expected_default + assert options.maximum_thread_pool_workers == expected_workers + + +def test_worker_with_concurrency_options(): + """Test that TaskHubGrpcWorker accepts concurrency options.""" + options = ConcurrencyOptions( + maximum_concurrent_activity_work_items=10, + maximum_concurrent_orchestration_work_items=20, + maximum_thread_pool_workers=15, + ) + + worker = TaskHubGrpcWorker(concurrency_options=options) + + assert worker.concurrency_options == options + + +def test_worker_default_options(): + """Test that TaskHubGrpcWorker uses default options when no parameters are provided.""" + worker = TaskHubGrpcWorker() + + processor_count = os.cpu_count() or 1 + expected_default = 100 * processor_count + expected_workers = processor_count + 4 + + assert ( + worker.concurrency_options.maximum_concurrent_activity_work_items == expected_default + ) + assert ( + worker.concurrency_options.maximum_concurrent_orchestration_work_items == expected_default + ) + assert worker.concurrency_options.maximum_thread_pool_workers == expected_workers + + +def test_concurrency_options_property_access(): + """Test that the concurrency_options property works correctly.""" + options = ConcurrencyOptions( + maximum_concurrent_activity_work_items=15, + maximum_concurrent_orchestration_work_items=25, + maximum_thread_pool_workers=30, + ) + + worker = TaskHubGrpcWorker(concurrency_options=options) + retrieved_options = worker.concurrency_options + + # Should be the same object + assert retrieved_options is options + + # Should have correct values + assert retrieved_options.maximum_concurrent_activity_work_items == 15 + assert retrieved_options.maximum_concurrent_orchestration_work_items == 25 + assert retrieved_options.maximum_thread_pool_workers == 30 diff --git a/tests/durabletask/test_worker_concurrency_loop.py b/tests/durabletask/test_worker_concurrency_loop.py new file mode 100644 index 0000000..de6753b --- /dev/null +++ b/tests/durabletask/test_worker_concurrency_loop.py @@ -0,0 +1,140 @@ +import asyncio +import threading +import time + +from durabletask.worker import ConcurrencyOptions, TaskHubGrpcWorker + + +class DummyStub: + def __init__(self): + self.completed = [] + + def CompleteOrchestratorTask(self, res): + self.completed.append(('orchestrator', res)) + + def CompleteActivityTask(self, res): + self.completed.append(('activity', res)) + + +class DummyRequest: + def __init__(self, kind, instance_id): + self.kind = kind + self.instanceId = instance_id + self.orchestrationInstance = type('O', (), {'instanceId': instance_id}) + self.name = 'dummy' + self.taskId = 1 + self.input = type('I', (), {'value': ''}) + self.pastEvents = [] + self.newEvents = [] + + def HasField(self, field): + return (field == 'orchestratorRequest' and self.kind == 'orchestrator') or \ + (field == 'activityRequest' and self.kind == 'activity') + + def WhichOneof(self, _): + return f'{self.kind}Request' + + +class DummyCompletionToken: + pass + + +def test_worker_concurrency_loop_sync(): + options = ConcurrencyOptions( + maximum_concurrent_activity_work_items=2, + maximum_concurrent_orchestration_work_items=1, + maximum_thread_pool_workers=2, + ) + worker = TaskHubGrpcWorker(concurrency_options=options) + stub = DummyStub() + + def dummy_orchestrator(req, stub, completionToken): + time.sleep(0.1) + stub.CompleteOrchestratorTask('ok') + + def dummy_activity(req, stub, completionToken): + time.sleep(0.1) + stub.CompleteActivityTask('ok') + + # Patch the worker's _execute_orchestrator and _execute_activity + worker._execute_orchestrator = dummy_orchestrator + worker._execute_activity = dummy_activity + + orchestrator_requests = [DummyRequest('orchestrator', f'orch{i}') for i in range(3)] + activity_requests = [DummyRequest('activity', f'act{i}') for i in range(4)] + + async def run_test(): + # Start the worker manager's run loop in the background + worker_task = asyncio.create_task(worker._async_worker_manager.run()) + for req in orchestrator_requests: + worker._async_worker_manager.submit_orchestration(dummy_orchestrator, req, stub, DummyCompletionToken()) + for req in activity_requests: + worker._async_worker_manager.submit_activity(dummy_activity, req, stub, DummyCompletionToken()) + await asyncio.sleep(1.0) + orchestrator_count = sum(1 for t, _ in stub.completed if t == 'orchestrator') + activity_count = sum(1 for t, _ in stub.completed if t == 'activity') + assert orchestrator_count == 3, f"Expected 3 orchestrator completions, got {orchestrator_count}" + assert activity_count == 4, f"Expected 4 activity completions, got {activity_count}" + worker._async_worker_manager._shutdown = True + await worker_task + asyncio.run(run_test()) + + +# Dummy orchestrator and activity for sync context +def dummy_orchestrator(ctx, input): + # Simulate some work + time.sleep(0.1) + return "orchestrator-done" + + +def dummy_activity(ctx, input): + # Simulate some work + time.sleep(0.1) + return "activity-done" + + +def test_worker_concurrency_sync(): + # Use small concurrency to make test observable + options = ConcurrencyOptions( + maximum_concurrent_activity_work_items=2, + maximum_concurrent_orchestration_work_items=2, + maximum_thread_pool_workers=2, + ) + worker = TaskHubGrpcWorker(concurrency_options=options) + worker.add_orchestrator(dummy_orchestrator) + worker.add_activity(dummy_activity) + + # Simulate submitting work items to the queues directly (bypassing gRPC) + # We'll use the internal _async_worker_manager for this test + manager = worker._async_worker_manager + results = [] + lock = threading.Lock() + + def make_work(kind, idx): + def fn(*args, **kwargs): + time.sleep(0.1) + with lock: + results.append((kind, idx)) + return f"{kind}-{idx}-done" + return fn + + # Submit more work than concurrency allows + for i in range(5): + manager.submit_orchestration(make_work("orch", i)) + manager.submit_activity(make_work("act", i)) + + # Run the manager loop in a thread (sync context) + def run_manager(): + asyncio.run(manager.run()) + + t = threading.Thread(target=run_manager) + t.start() + time.sleep(1.5) # Let work process + manager.shutdown() + # Unblock the consumers by putting dummy items in the queues + manager.activity_queue.put_nowait((lambda: None, (), {})) + manager.orchestration_queue.put_nowait((lambda: None, (), {})) + t.join(timeout=2) + + # Check that all work items completed + assert len(results) == 10 diff --git a/tests/durabletask/test_worker_concurrency_loop_async.py b/tests/durabletask/test_worker_concurrency_loop_async.py new file mode 100644 index 0000000..c7ba238 --- /dev/null +++ b/tests/durabletask/test_worker_concurrency_loop_async.py @@ -0,0 +1,80 @@ +import asyncio + +from durabletask.worker import ConcurrencyOptions, TaskHubGrpcWorker + + +class DummyStub: + def __init__(self): + self.completed = [] + + def CompleteOrchestratorTask(self, res): + self.completed.append(('orchestrator', res)) + + def CompleteActivityTask(self, res): + self.completed.append(('activity', res)) + + +class DummyRequest: + def __init__(self, kind, instance_id): + self.kind = kind + self.instanceId = instance_id + self.orchestrationInstance = type('O', (), {'instanceId': instance_id}) + self.name = 'dummy' + self.taskId = 1 + self.input = type('I', (), {'value': ''}) + self.pastEvents = [] + self.newEvents = [] + + def HasField(self, field): + return (field == 'orchestratorRequest' and self.kind == 'orchestrator') or \ + (field == 'activityRequest' and self.kind == 'activity') + + def WhichOneof(self, _): + return f'{self.kind}Request' + + +class DummyCompletionToken: + pass + + +def test_worker_concurrency_loop_async(): + options = ConcurrencyOptions( + maximum_concurrent_activity_work_items=2, + maximum_concurrent_orchestration_work_items=1, + maximum_thread_pool_workers=2, + ) + grpc_worker = TaskHubGrpcWorker(concurrency_options=options) + stub = DummyStub() + + async def dummy_orchestrator(req, stub, completionToken): + await asyncio.sleep(0.1) + stub.CompleteOrchestratorTask('ok') + + async def dummy_activity(req, stub, completionToken): + await asyncio.sleep(0.1) + stub.CompleteActivityTask('ok') + + # Patch the worker's _execute_orchestrator and _execute_activity + grpc_worker._execute_orchestrator = dummy_orchestrator + grpc_worker._execute_activity = dummy_activity + + orchestrator_requests = [DummyRequest('orchestrator', f'orch{i}') for i in range(3)] + activity_requests = [DummyRequest('activity', f'act{i}') for i in range(4)] + + async def run_test(): + # Clear stub state before each run + stub.completed.clear() + worker_task = asyncio.create_task(grpc_worker._async_worker_manager.run()) + for req in orchestrator_requests: + grpc_worker._async_worker_manager.submit_orchestration(dummy_orchestrator, req, stub, DummyCompletionToken()) + for req in activity_requests: + grpc_worker._async_worker_manager.submit_activity(dummy_activity, req, stub, DummyCompletionToken()) + await asyncio.sleep(1.0) + orchestrator_count = sum(1 for t, _ in stub.completed if t == 'orchestrator') + activity_count = sum(1 for t, _ in stub.completed if t == 'activity') + assert orchestrator_count == 3, f"Expected 3 orchestrator completions, got {orchestrator_count}" + assert activity_count == 4, f"Expected 4 activity completions, got {activity_count}" + grpc_worker._async_worker_manager._shutdown = True + await worker_task + asyncio.run(run_test()) + asyncio.run(run_test()) From 43a4453c51ba0745301253f53daff788896441c6 Mon Sep 17 00:00:00 2001 From: Bernd Verst Date: Tue, 3 Jun 2025 11:46:10 -0700 Subject: [PATCH 26/81] Update GitHub workflows and automate release (#51) * Update GitHub workflows and automate release * Update linter config * more workflow changes * linter fixes * ignore new linter warning * even more workflow cleanup and improvement * declare asyncio as package dependency * Update requirements.txt Signed-off-by: Albert Callarisa --- ...s-sdk.yml => durabletask-azuremanaged.yml} | 17 ++- .github/workflows/durabletask.yml | 108 ++++++++++++++++++ .github/workflows/pr-validation.yml | 59 ---------- pyproject.toml | 3 +- requirements.txt | 4 +- .../test_dts_activity_sequence.py | 6 +- .../test_dts_orchestration_e2e.py | 49 ++++---- .../test_durabletask_grpc_interceptor.py | 14 +-- tests/durabletask/test_orchestration_e2e.py | 7 +- 9 files changed, 163 insertions(+), 104 deletions(-) rename .github/workflows/{publish-dts-sdk.yml => durabletask-azuremanaged.yml} (87%) create mode 100644 .github/workflows/durabletask.yml delete mode 100644 .github/workflows/pr-validation.yml diff --git a/.github/workflows/publish-dts-sdk.yml b/.github/workflows/durabletask-azuremanaged.yml similarity index 87% rename from .github/workflows/publish-dts-sdk.yml rename to .github/workflows/durabletask-azuremanaged.yml index de773f2..73017e4 100644 --- a/.github/workflows/publish-dts-sdk.yml +++ b/.github/workflows/durabletask-azuremanaged.yml @@ -1,4 +1,4 @@ -name: Publish Durable Task Scheduler to PyPI +name: Durable Task Scheduler SDK (durabletask-azuremanaged) on: push: @@ -15,10 +15,10 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - name: Set up Python 3.12 + - name: Set up Python 3.13 uses: actions/setup-python@v5 with: - python-version: 3.12 + python-version: 3.13 - name: Install dependencies working-directory: durabletask-azuremanaged run: | @@ -28,10 +28,17 @@ jobs: - name: Run flake8 Linter working-directory: durabletask-azuremanaged run: flake8 . + - name: Run flake8 Linter + working-directory: tests/durabletask-azuremanaged + run: flake8 . run-docker-tests: + strategy: + fail-fast: false + matrix: + python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] env: - EMULATOR_VERSION: "v0.0.5" # Define the variable + EMULATOR_VERSION: "latest" needs: lint runs-on: ubuntu-latest steps: @@ -84,7 +91,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v5 with: - python-version: "3.12" # Adjust Python version as needed + python-version: "3.13" # Adjust Python version as needed - name: Install dependencies run: | diff --git a/.github/workflows/durabletask.yml b/.github/workflows/durabletask.yml new file mode 100644 index 0000000..4fb3fb0 --- /dev/null +++ b/.github/workflows/durabletask.yml @@ -0,0 +1,108 @@ +name: Durable Task SDK (durabletask) + +on: + push: + branches: + - "main" + tags: + - "v*" # Only run for tags starting with "v" + pull_request: + branches: + - "main" + +jobs: + lint-and-unit-tests: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Set up Python 3.13 + uses: actions/setup-python@v5 + with: + python-version: 3.13 + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install setuptools wheel tox + pip install flake8 + - name: Run flake8 Linter + working-directory: durabletask + run: flake8 . + - name: "Run flake8 linter: tests" + working-directory: tests/durabletask + run: flake8 . + - name: "Run flake8 linter: examples" + working-directory: examples + run: flake8 . + + run-tests: + strategy: + fail-fast: false + matrix: + python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] + needs: lint-and-unit-tests + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + - name: Install durabletask dependencies and the library itself + run: | + python -m pip install --upgrade pip + pip install flake8 pytest + pip install -r requirements.txt + pip install . + - name: Pytest unit tests + working-directory: tests/durabletask + run: | + pytest -m "not e2e and not dts" --verbose + # Sidecar for running e2e tests requires Go SDK + - name: Install Go SDK + uses: actions/setup-go@v5 + with: + go-version: 'stable' + # Install and run the durabletask-go sidecar for running e2e tests + - name: Pytest e2e tests + working-directory: tests/durabletask + run: | + go install github.com/microsoft/durabletask-go@main + durabletask-go --port 4001 & + pytest -m "e2e and not dts" --verbose + + publish: + if: startsWith(github.ref, 'refs/tags/v') # Only run if a matching tag is pushed + needs: run-tests + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Extract version from tag + run: echo "VERSION=${GITHUB_REF#refs/tags/v}" >> $GITHUB_ENV # Extract version from the tag + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.13" # Adjust Python version as needed + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install build twine + + - name: Build package from root directory + run: | + python -m build + + - name: Check package + run: | + twine check dist/* + + - name: Publish package to PyPI + env: + TWINE_USERNAME: __token__ + TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }} # Store your PyPI API token in GitHub Secrets + run: | + twine upload dist/* \ No newline at end of file diff --git a/.github/workflows/pr-validation.yml b/.github/workflows/pr-validation.yml deleted file mode 100644 index 1d14d83..0000000 --- a/.github/workflows/pr-validation.yml +++ /dev/null @@ -1,59 +0,0 @@ -# This workflow will install Python dependencies, run tests and lint with a variety of Python versions -# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python - -name: Build Validation - -on: - push: - branches: [ "main" ] - pull_request: - branches: [ "main" ] - merge_group: - -jobs: - build: - - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] - - steps: - - uses: actions/checkout@v4 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v5 - with: - python-version: ${{ matrix.python-version }} - - name: Install durabletask dependencies and the library itself in editable mode - run: | - python -m pip install --upgrade pip - pip install flake8 pytest - pip install -r requirements.txt - pip install -e . - - name: Install durabletask-azuremanaged dependencies - working-directory: examples/dts - run: | - python -m pip install --upgrade pip - pip install -r requirements.txt - - name: Lint with flake8 - run: | - flake8 . --count --show-source --statistics --exit-zero - - name: Pytest unit tests - working-directory: tests/durabletask - run: | - pytest -m "not e2e and not dts" --verbose - - # Sidecar for running e2e tests requires Go SDK - - name: Install Go SDK - uses: actions/setup-go@v5 - with: - go-version: 'stable' - - # Install and run the durabletask-go sidecar for running e2e tests - - name: Pytest e2e tests - working-directory: tests/durabletask - run: | - go install github.com/microsoft/durabletask-go@main - durabletask-go --port 4001 & - pytest -m "e2e and not dts" --verbose diff --git a/pyproject.toml b/pyproject.toml index 1491988..5438ca4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -26,7 +26,8 @@ license = {file = "LICENSE"} readme = "README.md" dependencies = [ "grpcio", - "protobuf" + "protobuf", + "asyncio" ] [project.urls] diff --git a/requirements.txt b/requirements.txt index 0da7d46..721453b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,5 +3,5 @@ grpcio>=1.60.0 # 1.60.0 is the version introducing protobuf 1.25.X support, newe protobuf pytest pytest-cov -azure-core -azure-identity \ No newline at end of file +azure-identity +asyncio \ No newline at end of file diff --git a/tests/durabletask-azuremanaged/test_dts_activity_sequence.py b/tests/durabletask-azuremanaged/test_dts_activity_sequence.py index c875e49..1a685d0 100644 --- a/tests/durabletask-azuremanaged/test_dts_activity_sequence.py +++ b/tests/durabletask-azuremanaged/test_dts_activity_sequence.py @@ -2,15 +2,15 @@ that calls an activity function in a sequence and prints the outputs.""" import os +import pytest + from durabletask import client, task from durabletask.azuremanaged.client import DurableTaskSchedulerClient from durabletask.azuremanaged.worker import DurableTaskSchedulerWorker -import pytest - - pytestmark = pytest.mark.dts + def hello(ctx: task.ActivityContext, name: str) -> str: """Activity function that returns a greeting""" return f'Hello {name}!' diff --git a/tests/durabletask-azuremanaged/test_dts_orchestration_e2e.py b/tests/durabletask-azuremanaged/test_dts_orchestration_e2e.py index f10e605..9b7603f 100644 --- a/tests/durabletask-azuremanaged/test_dts_orchestration_e2e.py +++ b/tests/durabletask-azuremanaged/test_dts_orchestration_e2e.py @@ -2,9 +2,8 @@ # Licensed under the MIT License. import json -import threading -import time import os +import threading from datetime import timedelta import pytest @@ -21,6 +20,7 @@ taskhub_name = os.getenv("TASKHUB", "default") endpoint = os.getenv("ENDPOINT", "http://localhost:8080") + def test_empty_orchestration(): invoked = False @@ -31,12 +31,12 @@ def empty_orchestrator(ctx: task.OrchestrationContext, _): # Start a worker, which will connect to the sidecar in a background thread with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, - taskhub=taskhub_name, token_credential=None) as w: + taskhub=taskhub_name, token_credential=None) as w: w.add_orchestrator(empty_orchestrator) w.start() c = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, - taskhub=taskhub_name, token_credential=None) + taskhub=taskhub_name, token_credential=None) id = c.schedule_new_orchestration(empty_orchestrator) state = c.wait_for_orchestration_completion(id, timeout=30) @@ -66,13 +66,13 @@ def sequence(ctx: task.OrchestrationContext, start_val: int): # Start a worker, which will connect to the sidecar in a background thread with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, - taskhub=taskhub_name, token_credential=None) as w: + taskhub=taskhub_name, token_credential=None) as w: w.add_orchestrator(sequence) w.add_activity(plus_one) w.start() task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, - taskhub=taskhub_name, token_credential=None) + taskhub=taskhub_name, token_credential=None) id = task_hub_client.schedule_new_orchestration(sequence, input=1) state = task_hub_client.wait_for_orchestration_completion( id, timeout=30) @@ -113,14 +113,14 @@ def orchestrator(ctx: task.OrchestrationContext, input: int): # Start a worker, which will connect to the sidecar in a background thread with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, - taskhub=taskhub_name, token_credential=None) as w: + taskhub=taskhub_name, token_credential=None) as w: w.add_orchestrator(orchestrator) w.add_activity(throw) w.add_activity(increment_counter) w.start() task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, - taskhub=taskhub_name, token_credential=None) + taskhub=taskhub_name, token_credential=None) id = task_hub_client.schedule_new_orchestration(orchestrator, input=1) state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) @@ -158,14 +158,14 @@ def parent_orchestrator(ctx: task.OrchestrationContext, count: int): # Start a worker, which will connect to the sidecar in a background thread with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, - taskhub=taskhub_name, token_credential=None) as w: + taskhub=taskhub_name, token_credential=None) as w: w.add_activity(increment) w.add_orchestrator(orchestrator_child) w.add_orchestrator(parent_orchestrator) w.start() task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, - taskhub=taskhub_name, token_credential=None) + taskhub=taskhub_name, token_credential=None) id = task_hub_client.schedule_new_orchestration(parent_orchestrator, input=10) state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) @@ -184,13 +184,13 @@ def orchestrator(ctx: task.OrchestrationContext, _): # Start a worker, which will connect to the sidecar in a background thread with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, - taskhub=taskhub_name, token_credential=None) as w: + taskhub=taskhub_name, token_credential=None) as w: w.add_orchestrator(orchestrator) w.start() # Start the orchestration and immediately raise events to it. task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, - taskhub=taskhub_name, token_credential=None) + taskhub=taskhub_name, token_credential=None) id = task_hub_client.schedule_new_orchestration(orchestrator) task_hub_client.raise_orchestration_event(id, 'A', data='a') task_hub_client.raise_orchestration_event(id, 'B', data='b') @@ -285,12 +285,12 @@ def orchestrator(ctx: task.OrchestrationContext, _): # Start a worker, which will connect to the sidecar in a background thread with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, - taskhub=taskhub_name, token_credential=None) as w: + taskhub=taskhub_name, token_credential=None) as w: w.add_orchestrator(orchestrator) w.start() task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, - taskhub=taskhub_name, token_credential=None) + taskhub=taskhub_name, token_credential=None) id = task_hub_client.schedule_new_orchestration(orchestrator) state = task_hub_client.wait_for_orchestration_start(id, timeout=30) assert state is not None @@ -302,23 +302,25 @@ def orchestrator(ctx: task.OrchestrationContext, _): assert state.runtime_status == client.OrchestrationStatus.TERMINATED assert state.serialized_output == json.dumps("some reason for termination") + def test_terminate_recursive(): def root(ctx: task.OrchestrationContext, _): result = yield ctx.call_sub_orchestrator(child) return result + def child(ctx: task.OrchestrationContext, _): result = yield ctx.wait_for_external_event("my_event") return result # Start a worker, which will connect to the sidecar in a background thread with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, - taskhub=taskhub_name, token_credential=None) as w: + taskhub=taskhub_name, token_credential=None) as w: w.add_orchestrator(root) w.add_orchestrator(child) w.start() task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, - taskhub=taskhub_name, token_credential=None) + taskhub=taskhub_name, token_credential=None) id = task_hub_client.schedule_new_orchestration(root) state = task_hub_client.wait_for_orchestration_start(id, timeout=30) assert state is not None @@ -331,7 +333,7 @@ def child(ctx: task.OrchestrationContext, _): assert state.runtime_status == client.OrchestrationStatus.TERMINATED # Verify that child orchestration is also terminated - c = task_hub_client.wait_for_orchestration_completion(id, timeout=30) + task_hub_client.wait_for_orchestration_completion(id, timeout=30) assert state is not None assert state.runtime_status == client.OrchestrationStatus.TERMINATED @@ -417,14 +419,14 @@ def throw_activity_with_retry(ctx: task.ActivityContext, _): raise RuntimeError("Kah-BOOOOM!!!") with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, - taskhub=taskhub_name, token_credential=None) as w: + taskhub=taskhub_name, token_credential=None) as w: w.add_orchestrator(parent_orchestrator_with_retry) w.add_orchestrator(child_orchestrator_with_retry) w.add_activity(throw_activity_with_retry) w.start() task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, - taskhub=taskhub_name, token_credential=None) + taskhub=taskhub_name, token_credential=None) id = task_hub_client.schedule_new_orchestration(parent_orchestrator_with_retry) state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) assert state is not None @@ -460,13 +462,13 @@ def throw_activity(ctx: task.ActivityContext, _): raise RuntimeError("Kah-BOOOOM!!!") with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, - taskhub=taskhub_name, token_credential=None) as w: + taskhub=taskhub_name, token_credential=None) as w: w.add_orchestrator(mock_orchestrator) w.add_activity(throw_activity) w.start() task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, - taskhub=taskhub_name, token_credential=None) + taskhub=taskhub_name, token_credential=None) id = task_hub_client.schedule_new_orchestration(mock_orchestrator) state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) assert state is not None @@ -477,6 +479,7 @@ def throw_activity(ctx: task.ActivityContext, _): assert state.failure_details.stack_trace is not None assert throw_activity_counter == 4 + def test_custom_status(): def empty_orchestrator(ctx: task.OrchestrationContext, _): @@ -484,12 +487,12 @@ def empty_orchestrator(ctx: task.OrchestrationContext, _): # Start a worker, which will connect to the sidecar in a background thread with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, - taskhub=taskhub_name, token_credential=None) as w: + taskhub=taskhub_name, token_credential=None) as w: w.add_orchestrator(empty_orchestrator) w.start() c = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, - taskhub=taskhub_name, token_credential=None) + taskhub=taskhub_name, token_credential=None) id = c.schedule_new_orchestration(empty_orchestrator) state = c.wait_for_orchestration_completion(id, timeout=30) diff --git a/tests/durabletask-azuremanaged/test_durabletask_grpc_interceptor.py b/tests/durabletask-azuremanaged/test_durabletask_grpc_interceptor.py index 62978f9..0480d3d 100644 --- a/tests/durabletask-azuremanaged/test_durabletask_grpc_interceptor.py +++ b/tests/durabletask-azuremanaged/test_durabletask_grpc_interceptor.py @@ -1,7 +1,6 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -import threading import unittest from concurrent import futures from importlib.metadata import version @@ -9,20 +8,17 @@ import grpc from durabletask.azuremanaged.client import DurableTaskSchedulerClient -from durabletask.azuremanaged.internal.durabletask_grpc_interceptor import ( - DTSDefaultClientInterceptorImpl, -) from durabletask.internal import orchestrator_service_pb2 as pb from durabletask.internal import orchestrator_service_pb2_grpc as stubs class MockTaskHubSidecarServiceServicer(stubs.TaskHubSidecarServiceServicer): """Mock implementation of the TaskHubSidecarService for testing.""" - + def __init__(self): self.captured_metadata = {} self.requests_received = 0 - + def GetInstance(self, request, context): """Implementation of GetInstance that captures the metadata.""" # Store all metadata key-value pairs from the context @@ -38,7 +34,7 @@ def GetInstance(self, request, context): class TestDurableTaskGrpcInterceptor(unittest.TestCase): """Tests for the DTSDefaultClientInterceptorImpl class.""" - + @classmethod def setUpClass(cls): # Start a real gRPC server on a free port @@ -52,11 +48,11 @@ def setUpClass(cls): # Start the server in a background thread cls.server.start() - + @classmethod def tearDownClass(cls): cls.server.stop(grace=None) - + def test_user_agent_metadata_passed_in_request(self): """Test that the user agent metadata is correctly passed in gRPC requests.""" # Create a client that connects to our mock server diff --git a/tests/durabletask/test_orchestration_e2e.py b/tests/durabletask/test_orchestration_e2e.py index d3d7f0b..3ccf782 100644 --- a/tests/durabletask/test_orchestration_e2e.py +++ b/tests/durabletask/test_orchestration_e2e.py @@ -278,10 +278,12 @@ def orchestrator(ctx: task.OrchestrationContext, _): assert state.runtime_status == client.OrchestrationStatus.TERMINATED assert state.serialized_output == json.dumps("some reason for termination") + def test_terminate_recursive(): def root(ctx: task.OrchestrationContext, _): result = yield ctx.call_sub_orchestrator(child) return result + def child(ctx: task.OrchestrationContext, _): result = yield ctx.wait_for_external_event("my_event") return result @@ -305,7 +307,7 @@ def child(ctx: task.OrchestrationContext, _): assert state.runtime_status == client.OrchestrationStatus.TERMINATED # Verify that child orchestration is also terminated - c = task_hub_client.wait_for_orchestration_completion(id, timeout=30) + task_hub_client.wait_for_orchestration_completion(id, timeout=30) assert state is not None assert state.runtime_status == client.OrchestrationStatus.TERMINATED @@ -321,7 +323,7 @@ def orchestrator(ctx: task.OrchestrationContext, input: int): result = yield ctx.wait_for_external_event("my_event") if not ctx.is_replaying: # NOTE: Real orchestrations should never interact with nonlocal variables like this. - nonlocal all_results + nonlocal all_results # noqa: F824 all_results.append(result) if len(all_results) <= 4: @@ -445,6 +447,7 @@ def throw_activity(ctx: task.ActivityContext, _): assert state.failure_details.stack_trace is not None assert throw_activity_counter == 4 + def test_custom_status(): def empty_orchestrator(ctx: task.OrchestrationContext, _): From dfec5dac13d32282a921e9ca0c2b5caf3e782f6c Mon Sep 17 00:00:00 2001 From: Elena Kolevska Date: Tue, 3 Jun 2025 21:29:05 +0100 Subject: [PATCH 27/81] Updates instructions for running e2e tests to match CI (#37) Signed-off-by: Elena Kolevska Co-authored-by: Bernd Verst Signed-off-by: Albert Callarisa --- README.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 87af41d..b9d829c 100644 --- a/README.md +++ b/README.md @@ -177,10 +177,11 @@ make test-unit ### Running E2E tests -The E2E (end-to-end) tests require a sidecar process to be running. You can use the Dapr sidecar for this or run a Durable Task test sidecar using the following `docker` command: +The E2E (end-to-end) tests require a sidecar process to be running. You can use the Dapr sidecar for this or run a Durable Task test sidecar using the following command: ```sh -docker run --name durabletask-sidecar -p 4001:4001 --env 'DURABLETASK_SIDECAR_LOGLEVEL=Debug' --rm cgillum/durabletask-sidecar:latest start --backend Emulator +go install github.com/microsoft/durabletask-go@main +durabletask-go --port 4001 ``` To run the E2E tests, run the following command from the project root: From 89437eb2677227fba400ae39150f0f402e62e9e8 Mon Sep 17 00:00:00 2001 From: Albert Callarisa Date: Wed, 18 Jun 2025 12:13:07 +0200 Subject: [PATCH 28/81] Bring examples back Signed-off-by: Albert Callarisa --- examples/README.md | 27 ++++++++++ examples/activity_sequence.py | 35 +++++++++++++ examples/fanout_fanin.py | 62 ++++++++++++++++++++++ examples/human_interaction.py | 99 +++++++++++++++++++++++++++++++++++ 4 files changed, 223 insertions(+) create mode 100644 examples/README.md create mode 100644 examples/activity_sequence.py create mode 100644 examples/fanout_fanin.py create mode 100644 examples/human_interaction.py diff --git a/examples/README.md b/examples/README.md new file mode 100644 index 0000000..404b127 --- /dev/null +++ b/examples/README.md @@ -0,0 +1,27 @@ +# Examples + +This directory contains examples of how to author durable orchestrations using the Durable Task Python SDK. + +## Prerequisites + +All the examples assume that you have a Durable Task-compatible sidecar running locally. There are two options for this: + +1. Install the latest version of the [Dapr CLI](https://docs.dapr.io/getting-started/install-dapr-cli/), which contains and exposes an embedded version of the Durable Task engine. The setup process (which requires Docker) will configure the workflow engine to store state in a local Redis container. + +2. Clone and run the [Durable Task Sidecar](https://github.com/microsoft/durabletask-go) project locally (requires Go 1.18 or higher). Orchestration state will be stored in a local sqlite database. + +## Running the examples + +With one of the sidecars running, you can simply execute any of the examples in this directory using `python3`: + +```sh +python3 ./activity_sequence.py +``` + +In some cases, the sample may require command-line parameters or user inputs. In these cases, the sample will print out instructions on how to proceed. + +## List of examples + +- [Activity sequence](./activity_sequence.py): Orchestration that schedules three activity calls in a sequence. +- [Fan-out/fan-in](./fanout_fanin.py): Orchestration that schedules a dynamic number of activity calls in parallel, waits for all of them to complete, and then performs an aggregation on the results. +- [Human interaction](./human_interaction.py): Orchestration that waits for a human to approve an order before continuing. diff --git a/examples/activity_sequence.py b/examples/activity_sequence.py new file mode 100644 index 0000000..066a733 --- /dev/null +++ b/examples/activity_sequence.py @@ -0,0 +1,35 @@ +"""End-to-end sample that demonstrates how to configure an orchestrator +that calls an activity function in a sequence and prints the outputs.""" +from durabletask import client, task, worker + + +def hello(ctx: task.ActivityContext, name: str) -> str: + """Activity function that returns a greeting""" + return f'Hello {name}!' + + +def sequence(ctx: task.OrchestrationContext, _): + """Orchestrator function that calls the 'hello' activity function in a sequence""" + # call "hello" activity function in a sequence + result1 = yield ctx.call_activity(hello, input='Tokyo') + result2 = yield ctx.call_activity(hello, input='Seattle') + result3 = yield ctx.call_activity(hello, input='London') + + # return an array of results + return [result1, result2, result3] + + +# configure and start the worker +with worker.TaskHubGrpcWorker() as w: + w.add_orchestrator(sequence) + w.add_activity(hello) + w.start() + + # create a client, start an orchestration, and wait for it to finish + c = client.TaskHubGrpcClient() + instance_id = c.schedule_new_orchestration(sequence) + state = c.wait_for_orchestration_completion(instance_id, timeout=10) + if state and state.runtime_status == client.OrchestrationStatus.COMPLETED: + print(f'Orchestration completed! Result: {state.serialized_output}') + elif state: + print(f'Orchestration failed: {state.failure_details}') diff --git a/examples/fanout_fanin.py b/examples/fanout_fanin.py new file mode 100644 index 0000000..c53744f --- /dev/null +++ b/examples/fanout_fanin.py @@ -0,0 +1,62 @@ +"""End-to-end sample that demonstrates how to configure an orchestrator +that a dynamic number activity functions in parallel, waits for them all +to complete, and prints an aggregate summary of the outputs.""" +import random +import time + +from durabletask import client, task, worker + + +def get_work_items(ctx: task.ActivityContext, _) -> list[str]: + """Activity function that returns a list of work items""" + # return a random number of work items + count = random.randint(2, 10) + print(f'generating {count} work items...') + return [f'work item {i}' for i in range(count)] + + +def process_work_item(ctx: task.ActivityContext, item: str) -> int: + """Activity function that returns a result for a given work item""" + print(f'processing work item: {item}') + + # simulate some work that takes a variable amount of time + time.sleep(random.random() * 5) + + # return a result for the given work item, which is also a random number in this case + return random.randint(0, 10) + + +def orchestrator(ctx: task.OrchestrationContext, _): + """Orchestrator function that calls the 'get_work_items' and 'process_work_item' + activity functions in parallel, waits for them all to complete, and prints + an aggregate summary of the outputs""" + + work_items: list[str] = yield ctx.call_activity(get_work_items) + + # execute the work-items in parallel and wait for them all to return + tasks = [ctx.call_activity(process_work_item, input=item) for item in work_items] + results: list[int] = yield task.when_all(tasks) + + # return an aggregate summary of the results + return { + 'work_items': work_items, + 'results': results, + 'total': sum(results), + } + + +# configure and start the worker +with worker.TaskHubGrpcWorker() as w: + w.add_orchestrator(orchestrator) + w.add_activity(process_work_item) + w.add_activity(get_work_items) + w.start() + + # create a client, start an orchestration, and wait for it to finish + c = client.TaskHubGrpcClient() + instance_id = c.schedule_new_orchestration(orchestrator) + state = c.wait_for_orchestration_completion(instance_id, timeout=30) + if state and state.runtime_status == client.OrchestrationStatus.COMPLETED: + print(f'Orchestration completed! Result: {state.serialized_output}') + elif state: + print(f'Orchestration failed: {state.failure_details}') diff --git a/examples/human_interaction.py b/examples/human_interaction.py new file mode 100644 index 0000000..2a01897 --- /dev/null +++ b/examples/human_interaction.py @@ -0,0 +1,99 @@ +"""End-to-end sample that demonstrates how to configure an orchestrator +that waits for an "approval" event before proceding to the next step. If +the approval isn't received within a specified timeout, the order that is +represented by the orchestration is automatically cancelled.""" + +import threading +import time +from collections import namedtuple +from dataclasses import dataclass +from datetime import timedelta + +from durabletask import client, task, worker + + +@dataclass +class Order: + """Represents a purchase order""" + Cost: float + Product: str + Quantity: int + + def __str__(self): + return f'{self.Product} ({self.Quantity})' + + +def send_approval_request(_: task.ActivityContext, order: Order) -> None: + """Activity function that sends an approval request to the manager""" + time.sleep(5) + print(f'*** Sending approval request for order: {order}') + + +def place_order(_: task.ActivityContext, order: Order) -> None: + """Activity function that places an order""" + print(f'*** Placing order: {order}') + + +def purchase_order_workflow(ctx: task.OrchestrationContext, order: Order): + """Orchestrator function that represents a purchase order workflow""" + # Orders under $1000 are auto-approved + if order.Cost < 1000: + return "Auto-approved" + + # Orders of $1000 or more require manager approval + yield ctx.call_activity(send_approval_request, input=order) + + # Approvals must be received within 24 hours or they will be canceled. + approval_event = ctx.wait_for_external_event("approval_received") + timeout_event = ctx.create_timer(timedelta(hours=24)) + winner = yield task.when_any([approval_event, timeout_event]) + if winner == timeout_event: + return "Cancelled" + + # The order was approved + yield ctx.call_activity(place_order, input=order) + approval_details = approval_event.get_result() + return f"Approved by '{approval_details.approver}'" + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser(description="Order purchasing workflow demo.") + parser.add_argument("--cost", type=int, default=2000, help="Cost of the order") + parser.add_argument("--approver", type=str, default="Me", help="Approver name") + parser.add_argument("--timeout", type=int, default=60, help="Timeout in seconds") + args = parser.parse_args() + + # configure and start the worker + with worker.TaskHubGrpcWorker() as w: + w.add_orchestrator(purchase_order_workflow) + w.add_activity(send_approval_request) + w.add_activity(place_order) + w.start() + + c = client.TaskHubGrpcClient() + + # Start a purchase order workflow using the user input + order = Order(args.cost, "MyProduct", 1) + instance_id = c.schedule_new_orchestration(purchase_order_workflow, input=order) + + def prompt_for_approval(): + input("Press [ENTER] to approve the order...\n") + approval_event = namedtuple("Approval", ["approver"])(args.approver) + c.raise_orchestration_event(instance_id, "approval_received", data=approval_event) + + # Prompt the user for approval on a background thread + threading.Thread(target=prompt_for_approval, daemon=True).start() + + # Wait for the orchestration to complete + try: + state = c.wait_for_orchestration_completion(instance_id, timeout=args.timeout + 2) + if not state: + print("Workflow not found!") # not expected + elif state.runtime_status == client.OrchestrationStatus.COMPLETED: + print(f'Orchestration completed! Result: {state.serialized_output}') + else: + state.raise_if_failed() # raises an exception + except TimeoutError: + print("*** Orchestration timed out!") From 3439afc91b29a65dc82b881cf484bb386507e758 Mon Sep 17 00:00:00 2001 From: Albert Callarisa Date: Wed, 18 Jun 2025 12:14:25 +0200 Subject: [PATCH 29/81] Remove misleading entry in changelog The changelog entry removed mentions `azuremanaged`, and that's something we are not bringing from upstream. Signed-off-by: Albert Callarisa --- CHANGELOG.md | 1 - 1 file changed, 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6921faa..376221e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,7 +21,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Added `set_custom_status` orchestrator API ([#31](https://github.com/microsoft/durabletask-python/pull/31)) - contributed by [@famarting](https://github.com/famarting) - Added `purge_orchestration` client API ([#34](https://github.com/microsoft/durabletask-python/pull/34)) - contributed by [@famarting](https://github.com/famarting) -- Added new `durabletask-azuremanaged` package for use with the [Durable Task Scheduler](https://learn.microsoft.com/azure/azure-functions/durable/durable-task-scheduler/durable-task-scheduler) - by [@RyanLettieri](https://github.com/RyanLettieri) ### Changes From 76033a2e2dbe680dbc7d6fcca3be447aee5f1415 Mon Sep 17 00:00:00 2001 From: Albert Callarisa Date: Wed, 18 Jun 2025 12:56:40 +0200 Subject: [PATCH 30/81] Fixed examples readme with specific dapr instructions Signed-off-by: Albert Callarisa --- examples/README.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/examples/README.md b/examples/README.md index 404b127..a6cd847 100644 --- a/examples/README.md +++ b/examples/README.md @@ -8,7 +8,11 @@ All the examples assume that you have a Durable Task-compatible sidecar running 1. Install the latest version of the [Dapr CLI](https://docs.dapr.io/getting-started/install-dapr-cli/), which contains and exposes an embedded version of the Durable Task engine. The setup process (which requires Docker) will configure the workflow engine to store state in a local Redis container. -2. Clone and run the [Durable Task Sidecar](https://github.com/microsoft/durabletask-go) project locally (requires Go 1.18 or higher). Orchestration state will be stored in a local sqlite database. +2. Run the [Durable Task Sidecar](https://github.com/dapr/durabletask-go) project locally (requires Go 1.18 or higher). Orchestration state will be stored in a local sqlite database. + ```sh + go install github.com/dapr/durabletask-go@main + durabletask-go --port 4001 + ``` ## Running the examples From f9e2bf5d72963a8083b0f9c2dfb03c09e6bcec3c Mon Sep 17 00:00:00 2001 From: Albert Callarisa Date: Mon, 30 Jun 2025 07:52:28 +0200 Subject: [PATCH 31/81] Use pinned grpcio-tools version For context: https://github.com/dapr/durabletask-python/pull/12#discussion_r2154292531 Signed-off-by: Albert Callarisa --- dev-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-requirements.txt b/dev-requirements.txt index b3ff6f7..119f072 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -1 +1 @@ -grpcio-tools +grpcio-tools==1.62.3 # 1.62.X is the latest version before protobuf 1.26.X is used which has breaking changes for Python From 6f12d1dad0c47ebaa8cf36e4ff85dafa7d6d9bb2 Mon Sep 17 00:00:00 2001 From: Albert Callarisa Date: Mon, 7 Jul 2025 14:20:18 +0200 Subject: [PATCH 32/81] fix tests Signed-off-by: Albert Callarisa --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index 04ea774..8c4d1e4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -44,6 +44,7 @@ local_scheme = "no-local-version" [tool.pytest.ini_options] minversion = "6.0" testpaths = ["tests"] +pythonpath = ["."] markers = [ "e2e: mark a test as an end-to-end test that requires a running sidecar" ] From 2719e59a50e7c8c10eb86ebecc70988fc1eb9ed4 Mon Sep 17 00:00:00 2001 From: Albert Callarisa Date: Fri, 5 Sep 2025 09:53:12 +0200 Subject: [PATCH 33/81] chore: regenerated protos using dapr fork Signed-off-by: Albert Callarisa --- Makefile | 5 +- durabletask/internal/PROTO_SOURCE_COMMIT_HASH | 2 +- .../internal/orchestrator_service_pb2.py | 458 +++++++------ .../internal/orchestrator_service_pb2.pyi | 227 +++++-- .../internal/orchestrator_service_pb2_grpc.py | 611 +++++++++++++++--- 5 files changed, 953 insertions(+), 350 deletions(-) diff --git a/Makefile b/Makefile index 5a05f33..3c1ed51 100644 --- a/Makefile +++ b/Makefile @@ -11,8 +11,9 @@ install: python3 -m pip install . gen-proto: - curl -o durabletask/internal/orchestrator_service.proto https://raw.githubusercontent.com/microsoft/durabletask-protobuf/refs/heads/main/protos/orchestrator_service.proto - curl -H "Accept: application/vnd.github.v3+json" "https://api.github.com/repos/microsoft/durabletask-protobuf/commits?path=protos/orchestrator_service.proto&sha=main&per_page=1" | jq -r '.[0].sha' >> durabletask/internal/PROTO_SOURCE_COMMIT_HASH + curl -o durabletask/internal/orchestrator_service.proto https://raw.githubusercontent.com/dapr/durabletask-protobuf/refs/heads/main/protos/orchestrator_service.proto + curl -H "Accept: application/vnd.github.v3+json" "https://api.github.com/repos/dapr/durabletask-protobuf/commits?path=protos/orchestrator_service.proto&sha=main&per_page=1" | jq -r '.[0].sha' > durabletask/internal/PROTO_SOURCE_COMMIT_HASH + pip install grpcio-tools==1.74.0 python3 -m grpc_tools.protoc --proto_path=. --python_out=. --pyi_out=. --grpc_python_out=. ./durabletask/internal/orchestrator_service.proto rm durabletask/internal/*.proto diff --git a/durabletask/internal/PROTO_SOURCE_COMMIT_HASH b/durabletask/internal/PROTO_SOURCE_COMMIT_HASH index 90bb04b..80179d7 100644 --- a/durabletask/internal/PROTO_SOURCE_COMMIT_HASH +++ b/durabletask/internal/PROTO_SOURCE_COMMIT_HASH @@ -1 +1 @@ -c672a0dc97c06587d7399ee12f1c5b0b9fc492a7c672a0dc97c06587d7399ee12f1c5b0b9fc492a7 +4b86756497d875b97f9a91051781b5711c1e4fa6 diff --git a/durabletask/internal/orchestrator_service_pb2.py b/durabletask/internal/orchestrator_service_pb2.py index 5efef70..ed91507 100644 --- a/durabletask/internal/orchestrator_service_pb2.py +++ b/durabletask/internal/orchestrator_service_pb2.py @@ -1,12 +1,22 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE # source: durabletask/internal/orchestrator_service.proto -# Protobuf Python Version: 4.25.1 +# Protobuf Python Version: 6.31.1 """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 6, + 31, + 1, + '', + 'durabletask/internal/orchestrator_service.proto' +) # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() @@ -18,218 +28,248 @@ from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n/durabletask/internal/orchestrator_service.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1bgoogle/protobuf/empty.proto\"^\n\x15OrchestrationInstance\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xed\x01\n\x0f\x41\x63tivityRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\x12\x0e\n\x06taskId\x18\x05 \x01(\x05\x12)\n\x12parentTraceContext\x18\x06 \x01(\x0b\x32\r.TraceContext\"\xaa\x01\n\x10\x41\x63tivityResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0e\n\x06taskId\x18\x02 \x01(\x05\x12,\n\x06result\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\x12\x17\n\x0f\x63ompletionToken\x18\x05 \x01(\t\"\xb2\x01\n\x12TaskFailureDetails\x12\x11\n\terrorType\x18\x01 \x01(\t\x12\x14\n\x0c\x65rrorMessage\x18\x02 \x01(\t\x12\x30\n\nstackTrace\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x0cinnerFailure\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\x12\x16\n\x0eisNonRetriable\x18\x05 \x01(\x08\"\xbf\x01\n\x12ParentInstanceInfo\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12*\n\x04name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\"i\n\x0cTraceContext\x12\x13\n\x0btraceParent\x18\x01 \x01(\t\x12\x12\n\x06spanID\x18\x02 \x01(\tB\x02\x18\x01\x12\x30\n\ntraceState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x88\x03\n\x15\x45xecutionStartedEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\x12+\n\x0eparentInstance\x18\x05 \x01(\x0b\x32\x13.ParentInstanceInfo\x12;\n\x17scheduledStartTimestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12)\n\x12parentTraceContext\x18\x07 \x01(\x0b\x32\r.TraceContext\x12\x39\n\x13orchestrationSpanID\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xa7\x01\n\x17\x45xecutionCompletedEvent\x12\x31\n\x13orchestrationStatus\x18\x01 \x01(\x0e\x32\x14.OrchestrationStatus\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x03 \x01(\x0b\x32\x13.TaskFailureDetails\"X\n\x18\x45xecutionTerminatedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x0f\n\x07recurse\x18\x02 \x01(\x08\"\xa9\x01\n\x12TaskScheduledEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x12parentTraceContext\x18\x04 \x01(\x0b\x32\r.TraceContext\"[\n\x12TaskCompletedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"W\n\x0fTaskFailedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\"\xcf\x01\n$SubOrchestrationInstanceCreatedEvent\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x12parentTraceContext\x18\x05 \x01(\x0b\x32\r.TraceContext\"o\n&SubOrchestrationInstanceCompletedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"k\n#SubOrchestrationInstanceFailedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\"?\n\x11TimerCreatedEvent\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"N\n\x0fTimerFiredEvent\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0f\n\x07timerId\x18\x02 \x01(\x05\"\x1a\n\x18OrchestratorStartedEvent\"\x1c\n\x1aOrchestratorCompletedEvent\"_\n\x0e\x45ventSentEvent\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"M\n\x10\x45ventRaisedEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x05input\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\":\n\x0cGenericEvent\x12*\n\x04\x64\x61ta\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"D\n\x11HistoryStateEvent\x12/\n\x12orchestrationState\x18\x01 \x01(\x0b\x32\x13.OrchestrationState\"A\n\x12\x43ontinueAsNewEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"F\n\x17\x45xecutionSuspendedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"D\n\x15\x45xecutionResumedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xdc\x01\n\x1c\x45ntityOperationSignaledEvent\x12\x11\n\trequestId\x18\x01 \x01(\t\x12\x11\n\toperation\x18\x02 \x01(\t\x12\x31\n\rscheduledTime\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10targetInstanceId\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xcb\x02\n\x1a\x45ntityOperationCalledEvent\x12\x11\n\trequestId\x18\x01 \x01(\t\x12\x11\n\toperation\x18\x02 \x01(\t\x12\x31\n\rscheduledTime\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10parentInstanceId\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x37\n\x11parentExecutionId\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10targetInstanceId\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x90\x01\n\x18\x45ntityLockRequestedEvent\x12\x19\n\x11\x63riticalSectionId\x18\x01 \x01(\t\x12\x0f\n\x07lockSet\x18\x02 \x03(\t\x12\x10\n\x08position\x18\x03 \x01(\x05\x12\x36\n\x10parentInstanceId\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"`\n\x1d\x45ntityOperationCompletedEvent\x12\x11\n\trequestId\x18\x01 \x01(\t\x12,\n\x06output\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\\\n\x1a\x45ntityOperationFailedEvent\x12\x11\n\trequestId\x18\x01 \x01(\t\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\"\xa2\x01\n\x15\x45ntityUnlockSentEvent\x12\x19\n\x11\x63riticalSectionId\x18\x01 \x01(\t\x12\x36\n\x10parentInstanceId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10targetInstanceId\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"3\n\x16\x45ntityLockGrantedEvent\x12\x19\n\x11\x63riticalSectionId\x18\x01 \x01(\t\"\xac\x0c\n\x0cHistoryEvent\x12\x0f\n\x07\x65ventId\x18\x01 \x01(\x05\x12-\n\ttimestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x10\x65xecutionStarted\x18\x03 \x01(\x0b\x32\x16.ExecutionStartedEventH\x00\x12\x36\n\x12\x65xecutionCompleted\x18\x04 \x01(\x0b\x32\x18.ExecutionCompletedEventH\x00\x12\x38\n\x13\x65xecutionTerminated\x18\x05 \x01(\x0b\x32\x19.ExecutionTerminatedEventH\x00\x12,\n\rtaskScheduled\x18\x06 \x01(\x0b\x32\x13.TaskScheduledEventH\x00\x12,\n\rtaskCompleted\x18\x07 \x01(\x0b\x32\x13.TaskCompletedEventH\x00\x12&\n\ntaskFailed\x18\x08 \x01(\x0b\x32\x10.TaskFailedEventH\x00\x12P\n\x1fsubOrchestrationInstanceCreated\x18\t \x01(\x0b\x32%.SubOrchestrationInstanceCreatedEventH\x00\x12T\n!subOrchestrationInstanceCompleted\x18\n \x01(\x0b\x32\'.SubOrchestrationInstanceCompletedEventH\x00\x12N\n\x1esubOrchestrationInstanceFailed\x18\x0b \x01(\x0b\x32$.SubOrchestrationInstanceFailedEventH\x00\x12*\n\x0ctimerCreated\x18\x0c \x01(\x0b\x32\x12.TimerCreatedEventH\x00\x12&\n\ntimerFired\x18\r \x01(\x0b\x32\x10.TimerFiredEventH\x00\x12\x38\n\x13orchestratorStarted\x18\x0e \x01(\x0b\x32\x19.OrchestratorStartedEventH\x00\x12<\n\x15orchestratorCompleted\x18\x0f \x01(\x0b\x32\x1b.OrchestratorCompletedEventH\x00\x12$\n\teventSent\x18\x10 \x01(\x0b\x32\x0f.EventSentEventH\x00\x12(\n\x0b\x65ventRaised\x18\x11 \x01(\x0b\x32\x11.EventRaisedEventH\x00\x12%\n\x0cgenericEvent\x18\x12 \x01(\x0b\x32\r.GenericEventH\x00\x12*\n\x0chistoryState\x18\x13 \x01(\x0b\x32\x12.HistoryStateEventH\x00\x12,\n\rcontinueAsNew\x18\x14 \x01(\x0b\x32\x13.ContinueAsNewEventH\x00\x12\x36\n\x12\x65xecutionSuspended\x18\x15 \x01(\x0b\x32\x18.ExecutionSuspendedEventH\x00\x12\x32\n\x10\x65xecutionResumed\x18\x16 \x01(\x0b\x32\x16.ExecutionResumedEventH\x00\x12@\n\x17\x65ntityOperationSignaled\x18\x17 \x01(\x0b\x32\x1d.EntityOperationSignaledEventH\x00\x12<\n\x15\x65ntityOperationCalled\x18\x18 \x01(\x0b\x32\x1b.EntityOperationCalledEventH\x00\x12\x42\n\x18\x65ntityOperationCompleted\x18\x19 \x01(\x0b\x32\x1e.EntityOperationCompletedEventH\x00\x12<\n\x15\x65ntityOperationFailed\x18\x1a \x01(\x0b\x32\x1b.EntityOperationFailedEventH\x00\x12\x38\n\x13\x65ntityLockRequested\x18\x1b \x01(\x0b\x32\x19.EntityLockRequestedEventH\x00\x12\x34\n\x11\x65ntityLockGranted\x18\x1c \x01(\x0b\x32\x17.EntityLockGrantedEventH\x00\x12\x32\n\x10\x65ntityUnlockSent\x18\x1d \x01(\x0b\x32\x16.EntityUnlockSentEventH\x00\x42\x0b\n\teventType\"~\n\x12ScheduleTaskAction\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x9c\x01\n\x1c\x43reateSubOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"?\n\x11\x43reateTimerAction\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"u\n\x0fSendEventAction\x12(\n\x08instance\x18\x01 \x01(\x0b\x32\x16.OrchestrationInstance\x12\x0c\n\x04name\x18\x02 \x01(\t\x12*\n\x04\x64\x61ta\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xb4\x02\n\x1b\x43ompleteOrchestrationAction\x12\x31\n\x13orchestrationStatus\x18\x01 \x01(\x0e\x32\x14.OrchestrationStatus\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07\x64\x65tails\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\nnewVersion\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12&\n\x0f\x63\x61rryoverEvents\x18\x05 \x03(\x0b\x32\r.HistoryEvent\x12+\n\x0e\x66\x61ilureDetails\x18\x06 \x01(\x0b\x32\x13.TaskFailureDetails\"q\n\x1cTerminateOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x0f\n\x07recurse\x18\x03 \x01(\x08\"\xfa\x02\n\x12OrchestratorAction\x12\n\n\x02id\x18\x01 \x01(\x05\x12+\n\x0cscheduleTask\x18\x02 \x01(\x0b\x32\x13.ScheduleTaskActionH\x00\x12?\n\x16\x63reateSubOrchestration\x18\x03 \x01(\x0b\x32\x1d.CreateSubOrchestrationActionH\x00\x12)\n\x0b\x63reateTimer\x18\x04 \x01(\x0b\x32\x12.CreateTimerActionH\x00\x12%\n\tsendEvent\x18\x05 \x01(\x0b\x32\x10.SendEventActionH\x00\x12=\n\x15\x63ompleteOrchestration\x18\x06 \x01(\x0b\x32\x1c.CompleteOrchestrationActionH\x00\x12?\n\x16terminateOrchestration\x18\x07 \x01(\x0b\x32\x1d.TerminateOrchestrationActionH\x00\x42\x18\n\x16orchestratorActionType\"\xfc\x01\n\x13OrchestratorRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12!\n\npastEvents\x18\x03 \x03(\x0b\x32\r.HistoryEvent\x12 \n\tnewEvents\x18\x04 \x03(\x0b\x32\r.HistoryEvent\x12\x37\n\x10\x65ntityParameters\x18\x05 \x01(\x0b\x32\x1d.OrchestratorEntityParameters\x12 \n\x18requiresHistoryStreaming\x18\x06 \x01(\x08\"\xd6\x01\n\x14OrchestratorResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12$\n\x07\x61\x63tions\x18\x02 \x03(\x0b\x32\x13.OrchestratorAction\x12\x32\n\x0c\x63ustomStatus\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x17\n\x0f\x63ompletionToken\x18\x04 \x01(\t\x12\x37\n\x12numEventsProcessed\x18\x05 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\"\xa3\x03\n\x15\x43reateInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12;\n\x17scheduledStartTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12?\n\x1aorchestrationIdReusePolicy\x18\x06 \x01(\x0b\x32\x1b.OrchestrationIdReusePolicy\x12\x31\n\x0b\x65xecutionId\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x04tags\x18\x08 \x03(\x0b\x32 .CreateInstanceRequest.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"w\n\x1aOrchestrationIdReusePolicy\x12-\n\x0foperationStatus\x18\x01 \x03(\x0e\x32\x14.OrchestrationStatus\x12*\n\x06\x61\x63tion\x18\x02 \x01(\x0e\x32\x1a.CreateOrchestrationAction\",\n\x16\x43reateInstanceResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\"E\n\x12GetInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x1b\n\x13getInputsAndOutputs\x18\x02 \x01(\x08\"V\n\x13GetInstanceResponse\x12\x0e\n\x06\x65xists\x18\x01 \x01(\x08\x12/\n\x12orchestrationState\x18\x02 \x01(\x0b\x32\x13.OrchestrationState\"Y\n\x15RewindInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x18\n\x16RewindInstanceResponse\"\xa4\x05\n\x12OrchestrationState\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\x13orchestrationStatus\x18\x04 \x01(\x0e\x32\x14.OrchestrationStatus\x12;\n\x17scheduledStartTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x34\n\x10\x63reatedTimestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x38\n\x14lastUpdatedTimestamp\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\x05input\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12,\n\x06output\x18\t \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0c\x63ustomStatus\x18\n \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x0b \x01(\x0b\x32\x13.TaskFailureDetails\x12\x31\n\x0b\x65xecutionId\x18\x0c \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x12\x63ompletedTimestamp\x18\r \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x36\n\x10parentInstanceId\x18\x0e \x01(\x0b\x32\x1c.google.protobuf.StringValue\"b\n\x11RaiseEventRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x14\n\x12RaiseEventResponse\"g\n\x10TerminateRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06output\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x11\n\trecursive\x18\x03 \x01(\x08\"\x13\n\x11TerminateResponse\"R\n\x0eSuspendRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x11\n\x0fSuspendResponse\"Q\n\rResumeRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x10\n\x0eResumeResponse\"6\n\x15QueryInstancesRequest\x12\x1d\n\x05query\x18\x01 \x01(\x0b\x32\x0e.InstanceQuery\"\x82\x03\n\rInstanceQuery\x12+\n\rruntimeStatus\x18\x01 \x03(\x0e\x32\x14.OrchestrationStatus\x12\x33\n\x0f\x63reatedTimeFrom\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rcreatedTimeTo\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0ctaskHubNames\x18\x04 \x03(\x0b\x32\x1c.google.protobuf.StringValue\x12\x18\n\x10maxInstanceCount\x18\x05 \x01(\x05\x12\x37\n\x11\x63ontinuationToken\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10instanceIdPrefix\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1d\n\x15\x66\x65tchInputsAndOutputs\x18\x08 \x01(\x08\"\x82\x01\n\x16QueryInstancesResponse\x12/\n\x12orchestrationState\x18\x01 \x03(\x0b\x32\x13.OrchestrationState\x12\x37\n\x11\x63ontinuationToken\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x80\x01\n\x15PurgeInstancesRequest\x12\x14\n\ninstanceId\x18\x01 \x01(\tH\x00\x12\x33\n\x13purgeInstanceFilter\x18\x02 \x01(\x0b\x32\x14.PurgeInstanceFilterH\x00\x12\x11\n\trecursive\x18\x03 \x01(\x08\x42\t\n\x07request\"\xaa\x01\n\x13PurgeInstanceFilter\x12\x33\n\x0f\x63reatedTimeFrom\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rcreatedTimeTo\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\rruntimeStatus\x18\x03 \x03(\x0e\x32\x14.OrchestrationStatus\"6\n\x16PurgeInstancesResponse\x12\x1c\n\x14\x64\x65letedInstanceCount\x18\x01 \x01(\x05\"0\n\x14\x43reateTaskHubRequest\x12\x18\n\x10recreateIfExists\x18\x01 \x01(\x08\"\x17\n\x15\x43reateTaskHubResponse\"\x16\n\x14\x44\x65leteTaskHubRequest\"\x17\n\x15\x44\x65leteTaskHubResponse\"\xaa\x01\n\x13SignalEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x11\n\trequestId\x18\x04 \x01(\t\x12\x31\n\rscheduledTime\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\x16\n\x14SignalEntityResponse\"<\n\x10GetEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x14\n\x0cincludeState\x18\x02 \x01(\x08\"D\n\x11GetEntityResponse\x12\x0e\n\x06\x65xists\x18\x01 \x01(\x08\x12\x1f\n\x06\x65ntity\x18\x02 \x01(\x0b\x32\x0f.EntityMetadata\"\xcb\x02\n\x0b\x45ntityQuery\x12:\n\x14instanceIdStartsWith\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x34\n\x10lastModifiedFrom\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0elastModifiedTo\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x14\n\x0cincludeState\x18\x04 \x01(\x08\x12\x18\n\x10includeTransient\x18\x05 \x01(\x08\x12-\n\x08pageSize\x18\x06 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12\x37\n\x11\x63ontinuationToken\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"3\n\x14QueryEntitiesRequest\x12\x1b\n\x05query\x18\x01 \x01(\x0b\x32\x0c.EntityQuery\"s\n\x15QueryEntitiesResponse\x12!\n\x08\x65ntities\x18\x01 \x03(\x0b\x32\x0f.EntityMetadata\x12\x37\n\x11\x63ontinuationToken\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xdb\x01\n\x0e\x45ntityMetadata\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x34\n\x10lastModifiedTime\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x18\n\x10\x62\x61\x63klogQueueSize\x18\x03 \x01(\x05\x12.\n\x08lockedBy\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x0fserializedState\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x8f\x01\n\x19\x43leanEntityStorageRequest\x12\x37\n\x11\x63ontinuationToken\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1b\n\x13removeEmptyEntities\x18\x02 \x01(\x08\x12\x1c\n\x14releaseOrphanedLocks\x18\x03 \x01(\x08\"\x92\x01\n\x1a\x43leanEntityStorageResponse\x12\x37\n\x11\x63ontinuationToken\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1c\n\x14\x65mptyEntitiesRemoved\x18\x02 \x01(\x05\x12\x1d\n\x15orphanedLocksReleased\x18\x03 \x01(\x05\"]\n\x1cOrchestratorEntityParameters\x12=\n\x1a\x65ntityMessageReorderWindow\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\"\x82\x01\n\x12\x45ntityBatchRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65ntityState\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12%\n\noperations\x18\x03 \x03(\x0b\x32\x11.OperationRequest\"\xb9\x01\n\x11\x45ntityBatchResult\x12!\n\x07results\x18\x01 \x03(\x0b\x32\x10.OperationResult\x12!\n\x07\x61\x63tions\x18\x02 \x03(\x0b\x32\x10.OperationAction\x12\x31\n\x0b\x65ntityState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\"\x95\x01\n\rEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x13\n\x0b\x65xecutionId\x18\x02 \x01(\t\x12\x31\n\x0b\x65ntityState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12(\n\x11operationRequests\x18\x04 \x03(\x0b\x32\r.HistoryEvent\"e\n\x10OperationRequest\x12\x11\n\toperation\x18\x01 \x01(\t\x12\x11\n\trequestId\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"w\n\x0fOperationResult\x12*\n\x07success\x18\x01 \x01(\x0b\x32\x17.OperationResultSuccessH\x00\x12*\n\x07\x66\x61ilure\x18\x02 \x01(\x0b\x32\x17.OperationResultFailureH\x00\x42\x0c\n\nresultType\"F\n\x16OperationResultSuccess\x12,\n\x06result\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"E\n\x16OperationResultFailure\x12+\n\x0e\x66\x61ilureDetails\x18\x01 \x01(\x0b\x32\x13.TaskFailureDetails\"\x9c\x01\n\x0fOperationAction\x12\n\n\x02id\x18\x01 \x01(\x05\x12\'\n\nsendSignal\x18\x02 \x01(\x0b\x32\x11.SendSignalActionH\x00\x12=\n\x15startNewOrchestration\x18\x03 \x01(\x0b\x32\x1c.StartNewOrchestrationActionH\x00\x42\x15\n\x13operationActionType\"\x94\x01\n\x10SendSignalAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rscheduledTime\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xce\x01\n\x1bStartNewOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rscheduledTime\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xb9\x01\n\x13GetWorkItemsRequest\x12+\n#maxConcurrentOrchestrationWorkItems\x18\x01 \x01(\x05\x12&\n\x1emaxConcurrentActivityWorkItems\x18\x02 \x01(\x05\x12$\n\x1cmaxConcurrentEntityWorkItems\x18\x03 \x01(\x05\x12\'\n\x0c\x63\x61pabilities\x18\n \x03(\x0e\x32\x11.WorkerCapability\"\x8c\x02\n\x08WorkItem\x12\x33\n\x13orchestratorRequest\x18\x01 \x01(\x0b\x32\x14.OrchestratorRequestH\x00\x12+\n\x0f\x61\x63tivityRequest\x18\x02 \x01(\x0b\x32\x10.ActivityRequestH\x00\x12,\n\rentityRequest\x18\x03 \x01(\x0b\x32\x13.EntityBatchRequestH\x00\x12!\n\nhealthPing\x18\x04 \x01(\x0b\x32\x0b.HealthPingH\x00\x12)\n\x0f\x65ntityRequestV2\x18\x05 \x01(\x0b\x32\x0e.EntityRequestH\x00\x12\x17\n\x0f\x63ompletionToken\x18\n \x01(\tB\t\n\x07request\"\x16\n\x14\x43ompleteTaskResponse\"\x0c\n\nHealthPing\"\x84\x01\n\x1cStreamInstanceHistoryRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1d\n\x15\x66orWorkItemProcessing\x18\x03 \x01(\x08\"-\n\x0cHistoryChunk\x12\x1d\n\x06\x65vents\x18\x01 \x03(\x0b\x32\r.HistoryEvent*\xb5\x02\n\x13OrchestrationStatus\x12 \n\x1cORCHESTRATION_STATUS_RUNNING\x10\x00\x12\"\n\x1eORCHESTRATION_STATUS_COMPLETED\x10\x01\x12)\n%ORCHESTRATION_STATUS_CONTINUED_AS_NEW\x10\x02\x12\x1f\n\x1bORCHESTRATION_STATUS_FAILED\x10\x03\x12!\n\x1dORCHESTRATION_STATUS_CANCELED\x10\x04\x12#\n\x1fORCHESTRATION_STATUS_TERMINATED\x10\x05\x12 \n\x1cORCHESTRATION_STATUS_PENDING\x10\x06\x12\"\n\x1eORCHESTRATION_STATUS_SUSPENDED\x10\x07*A\n\x19\x43reateOrchestrationAction\x12\t\n\x05\x45RROR\x10\x00\x12\n\n\x06IGNORE\x10\x01\x12\r\n\tTERMINATE\x10\x02*^\n\x10WorkerCapability\x12!\n\x1dWORKER_CAPABILITY_UNSPECIFIED\x10\x00\x12\'\n#WORKER_CAPABILITY_HISTORY_STREAMING\x10\x01\x32\xc5\x0b\n\x15TaskHubSidecarService\x12\x37\n\x05Hello\x12\x16.google.protobuf.Empty\x1a\x16.google.protobuf.Empty\x12@\n\rStartInstance\x12\x16.CreateInstanceRequest\x1a\x17.CreateInstanceResponse\x12\x38\n\x0bGetInstance\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x41\n\x0eRewindInstance\x12\x16.RewindInstanceRequest\x1a\x17.RewindInstanceResponse\x12\x41\n\x14WaitForInstanceStart\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x46\n\x19WaitForInstanceCompletion\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x35\n\nRaiseEvent\x12\x12.RaiseEventRequest\x1a\x13.RaiseEventResponse\x12:\n\x11TerminateInstance\x12\x11.TerminateRequest\x1a\x12.TerminateResponse\x12\x34\n\x0fSuspendInstance\x12\x0f.SuspendRequest\x1a\x10.SuspendResponse\x12\x31\n\x0eResumeInstance\x12\x0e.ResumeRequest\x1a\x0f.ResumeResponse\x12\x41\n\x0eQueryInstances\x12\x16.QueryInstancesRequest\x1a\x17.QueryInstancesResponse\x12\x41\n\x0ePurgeInstances\x12\x16.PurgeInstancesRequest\x1a\x17.PurgeInstancesResponse\x12\x31\n\x0cGetWorkItems\x12\x14.GetWorkItemsRequest\x1a\t.WorkItem0\x01\x12@\n\x14\x43ompleteActivityTask\x12\x11.ActivityResponse\x1a\x15.CompleteTaskResponse\x12H\n\x18\x43ompleteOrchestratorTask\x12\x15.OrchestratorResponse\x1a\x15.CompleteTaskResponse\x12?\n\x12\x43ompleteEntityTask\x12\x12.EntityBatchResult\x1a\x15.CompleteTaskResponse\x12G\n\x15StreamInstanceHistory\x12\x1d.StreamInstanceHistoryRequest\x1a\r.HistoryChunk0\x01\x12>\n\rCreateTaskHub\x12\x15.CreateTaskHubRequest\x1a\x16.CreateTaskHubResponse\x12>\n\rDeleteTaskHub\x12\x15.DeleteTaskHubRequest\x1a\x16.DeleteTaskHubResponse\x12;\n\x0cSignalEntity\x12\x14.SignalEntityRequest\x1a\x15.SignalEntityResponse\x12\x32\n\tGetEntity\x12\x11.GetEntityRequest\x1a\x12.GetEntityResponse\x12>\n\rQueryEntities\x12\x15.QueryEntitiesRequest\x1a\x16.QueryEntitiesResponse\x12M\n\x12\x43leanEntityStorage\x12\x1a.CleanEntityStorageRequest\x1a\x1b.CleanEntityStorageResponseBf\n1com.microsoft.durabletask.implementation.protobufZ\x10/internal/protos\xaa\x02\x1eMicrosoft.DurableTask.Protobufb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n/durabletask/internal/orchestrator_service.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1bgoogle/protobuf/empty.proto\"K\n\nTaskRouter\x12\x13\n\x0bsourceAppID\x18\x01 \x01(\t\x12\x18\n\x0btargetAppID\x18\x02 \x01(\tH\x00\x88\x01\x01\x42\x0e\n\x0c_targetAppID\"^\n\x15OrchestrationInstance\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x86\x02\n\x0f\x41\x63tivityRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\x12\x0e\n\x06taskId\x18\x05 \x01(\x05\x12)\n\x12parentTraceContext\x18\x06 \x01(\x0b\x32\r.TraceContext\x12\x17\n\x0ftaskExecutionId\x18\x07 \x01(\t\"\xaa\x01\n\x10\x41\x63tivityResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0e\n\x06taskId\x18\x02 \x01(\x05\x12,\n\x06result\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\x12\x17\n\x0f\x63ompletionToken\x18\x05 \x01(\t\"\xb2\x01\n\x12TaskFailureDetails\x12\x11\n\terrorType\x18\x01 \x01(\t\x12\x14\n\x0c\x65rrorMessage\x18\x02 \x01(\t\x12\x30\n\nstackTrace\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x0cinnerFailure\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\x12\x16\n\x0eisNonRetriable\x18\x05 \x01(\x08\"\xdd\x01\n\x12ParentInstanceInfo\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12*\n\x04name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\x12\x12\n\x05\x61ppID\x18\x05 \x01(\tH\x00\x88\x01\x01\x42\x08\n\x06_appID\"i\n\x0cTraceContext\x12\x13\n\x0btraceParent\x18\x01 \x01(\t\x12\x12\n\x06spanID\x18\x02 \x01(\tB\x02\x18\x01\x12\x30\n\ntraceState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xe5\x03\n\x15\x45xecutionStartedEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\x12+\n\x0eparentInstance\x18\x05 \x01(\x0b\x32\x13.ParentInstanceInfo\x12;\n\x17scheduledStartTimestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12)\n\x12parentTraceContext\x18\x07 \x01(\x0b\x32\r.TraceContext\x12\x39\n\x13orchestrationSpanID\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x04tags\x18\t \x03(\x0b\x32 .ExecutionStartedEvent.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xa7\x01\n\x17\x45xecutionCompletedEvent\x12\x31\n\x13orchestrationStatus\x18\x01 \x01(\x0e\x32\x14.OrchestrationStatus\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x03 \x01(\x0b\x32\x13.TaskFailureDetails\"X\n\x18\x45xecutionTerminatedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x0f\n\x07recurse\x18\x02 \x01(\x08\"\xc2\x01\n\x12TaskScheduledEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x12parentTraceContext\x18\x04 \x01(\x0b\x32\r.TraceContext\x12\x17\n\x0ftaskExecutionId\x18\x05 \x01(\t\"t\n\x12TaskCompletedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x17\n\x0ftaskExecutionId\x18\x03 \x01(\t\"p\n\x0fTaskFailedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\x12\x17\n\x0ftaskExecutionId\x18\x03 \x01(\t\"\xcf\x01\n$SubOrchestrationInstanceCreatedEvent\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x12parentTraceContext\x18\x05 \x01(\x0b\x32\r.TraceContext\"o\n&SubOrchestrationInstanceCompletedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"k\n#SubOrchestrationInstanceFailedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\"[\n\x11TimerCreatedEvent\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x11\n\x04name\x18\x02 \x01(\tH\x00\x88\x01\x01\x42\x07\n\x05_name\"N\n\x0fTimerFiredEvent\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0f\n\x07timerId\x18\x02 \x01(\x05\"\x1a\n\x18OrchestratorStartedEvent\"\x1c\n\x1aOrchestratorCompletedEvent\"_\n\x0e\x45ventSentEvent\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"M\n\x10\x45ventRaisedEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x05input\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\":\n\x0cGenericEvent\x12*\n\x04\x64\x61ta\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"D\n\x11HistoryStateEvent\x12/\n\x12orchestrationState\x18\x01 \x01(\x0b\x32\x13.OrchestrationState\"A\n\x12\x43ontinueAsNewEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"F\n\x17\x45xecutionSuspendedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"D\n\x15\x45xecutionResumedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xdc\x01\n\x1c\x45ntityOperationSignaledEvent\x12\x11\n\trequestId\x18\x01 \x01(\t\x12\x11\n\toperation\x18\x02 \x01(\t\x12\x31\n\rscheduledTime\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10targetInstanceId\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xcb\x02\n\x1a\x45ntityOperationCalledEvent\x12\x11\n\trequestId\x18\x01 \x01(\t\x12\x11\n\toperation\x18\x02 \x01(\t\x12\x31\n\rscheduledTime\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10parentInstanceId\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x37\n\x11parentExecutionId\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10targetInstanceId\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x90\x01\n\x18\x45ntityLockRequestedEvent\x12\x19\n\x11\x63riticalSectionId\x18\x01 \x01(\t\x12\x0f\n\x07lockSet\x18\x02 \x03(\t\x12\x10\n\x08position\x18\x03 \x01(\x05\x12\x36\n\x10parentInstanceId\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"`\n\x1d\x45ntityOperationCompletedEvent\x12\x11\n\trequestId\x18\x01 \x01(\t\x12,\n\x06output\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\\\n\x1a\x45ntityOperationFailedEvent\x12\x11\n\trequestId\x18\x01 \x01(\t\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\"\xa2\x01\n\x15\x45ntityUnlockSentEvent\x12\x19\n\x11\x63riticalSectionId\x18\x01 \x01(\t\x12\x36\n\x10parentInstanceId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10targetInstanceId\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"3\n\x16\x45ntityLockGrantedEvent\x12\x19\n\x11\x63riticalSectionId\x18\x01 \x01(\t\"\xd9\x0c\n\x0cHistoryEvent\x12\x0f\n\x07\x65ventId\x18\x01 \x01(\x05\x12-\n\ttimestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x10\x65xecutionStarted\x18\x03 \x01(\x0b\x32\x16.ExecutionStartedEventH\x00\x12\x36\n\x12\x65xecutionCompleted\x18\x04 \x01(\x0b\x32\x18.ExecutionCompletedEventH\x00\x12\x38\n\x13\x65xecutionTerminated\x18\x05 \x01(\x0b\x32\x19.ExecutionTerminatedEventH\x00\x12,\n\rtaskScheduled\x18\x06 \x01(\x0b\x32\x13.TaskScheduledEventH\x00\x12,\n\rtaskCompleted\x18\x07 \x01(\x0b\x32\x13.TaskCompletedEventH\x00\x12&\n\ntaskFailed\x18\x08 \x01(\x0b\x32\x10.TaskFailedEventH\x00\x12P\n\x1fsubOrchestrationInstanceCreated\x18\t \x01(\x0b\x32%.SubOrchestrationInstanceCreatedEventH\x00\x12T\n!subOrchestrationInstanceCompleted\x18\n \x01(\x0b\x32\'.SubOrchestrationInstanceCompletedEventH\x00\x12N\n\x1esubOrchestrationInstanceFailed\x18\x0b \x01(\x0b\x32$.SubOrchestrationInstanceFailedEventH\x00\x12*\n\x0ctimerCreated\x18\x0c \x01(\x0b\x32\x12.TimerCreatedEventH\x00\x12&\n\ntimerFired\x18\r \x01(\x0b\x32\x10.TimerFiredEventH\x00\x12\x38\n\x13orchestratorStarted\x18\x0e \x01(\x0b\x32\x19.OrchestratorStartedEventH\x00\x12<\n\x15orchestratorCompleted\x18\x0f \x01(\x0b\x32\x1b.OrchestratorCompletedEventH\x00\x12$\n\teventSent\x18\x10 \x01(\x0b\x32\x0f.EventSentEventH\x00\x12(\n\x0b\x65ventRaised\x18\x11 \x01(\x0b\x32\x11.EventRaisedEventH\x00\x12%\n\x0cgenericEvent\x18\x12 \x01(\x0b\x32\r.GenericEventH\x00\x12*\n\x0chistoryState\x18\x13 \x01(\x0b\x32\x12.HistoryStateEventH\x00\x12,\n\rcontinueAsNew\x18\x14 \x01(\x0b\x32\x13.ContinueAsNewEventH\x00\x12\x36\n\x12\x65xecutionSuspended\x18\x15 \x01(\x0b\x32\x18.ExecutionSuspendedEventH\x00\x12\x32\n\x10\x65xecutionResumed\x18\x16 \x01(\x0b\x32\x16.ExecutionResumedEventH\x00\x12@\n\x17\x65ntityOperationSignaled\x18\x17 \x01(\x0b\x32\x1d.EntityOperationSignaledEventH\x00\x12<\n\x15\x65ntityOperationCalled\x18\x18 \x01(\x0b\x32\x1b.EntityOperationCalledEventH\x00\x12\x42\n\x18\x65ntityOperationCompleted\x18\x19 \x01(\x0b\x32\x1e.EntityOperationCompletedEventH\x00\x12<\n\x15\x65ntityOperationFailed\x18\x1a \x01(\x0b\x32\x1b.EntityOperationFailedEventH\x00\x12\x38\n\x13\x65ntityLockRequested\x18\x1b \x01(\x0b\x32\x19.EntityLockRequestedEventH\x00\x12\x34\n\x11\x65ntityLockGranted\x18\x1c \x01(\x0b\x32\x17.EntityLockGrantedEventH\x00\x12\x32\n\x10\x65ntityUnlockSent\x18\x1d \x01(\x0b\x32\x16.EntityUnlockSentEventH\x00\x12 \n\x06router\x18\x1e \x01(\x0b\x32\x0b.TaskRouterH\x01\x88\x01\x01\x42\x0b\n\teventTypeB\t\n\x07_router\"\xc4\x01\n\x12ScheduleTaskAction\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12 \n\x06router\x18\x04 \x01(\x0b\x32\x0b.TaskRouterH\x00\x88\x01\x01\x12\x17\n\x0ftaskExecutionId\x18\x05 \x01(\tB\t\n\x07_router\"\xc9\x01\n\x1c\x43reateSubOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12 \n\x06router\x18\x05 \x01(\x0b\x32\x0b.TaskRouterH\x00\x88\x01\x01\x42\t\n\x07_router\"[\n\x11\x43reateTimerAction\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x11\n\x04name\x18\x02 \x01(\tH\x00\x88\x01\x01\x42\x07\n\x05_name\"u\n\x0fSendEventAction\x12(\n\x08instance\x18\x01 \x01(\x0b\x32\x16.OrchestrationInstance\x12\x0c\n\x04name\x18\x02 \x01(\t\x12*\n\x04\x64\x61ta\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xb4\x02\n\x1b\x43ompleteOrchestrationAction\x12\x31\n\x13orchestrationStatus\x18\x01 \x01(\x0e\x32\x14.OrchestrationStatus\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07\x64\x65tails\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\nnewVersion\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12&\n\x0f\x63\x61rryoverEvents\x18\x05 \x03(\x0b\x32\r.HistoryEvent\x12+\n\x0e\x66\x61ilureDetails\x18\x06 \x01(\x0b\x32\x13.TaskFailureDetails\"q\n\x1cTerminateOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x0f\n\x07recurse\x18\x03 \x01(\x08\"\x9c\x02\n\x17SendEntityMessageAction\x12@\n\x17\x65ntityOperationSignaled\x18\x01 \x01(\x0b\x32\x1d.EntityOperationSignaledEventH\x00\x12<\n\x15\x65ntityOperationCalled\x18\x02 \x01(\x0b\x32\x1b.EntityOperationCalledEventH\x00\x12\x38\n\x13\x65ntityLockRequested\x18\x03 \x01(\x0b\x32\x19.EntityLockRequestedEventH\x00\x12\x32\n\x10\x65ntityUnlockSent\x18\x04 \x01(\x0b\x32\x16.EntityUnlockSentEventH\x00\x42\x13\n\x11\x45ntityMessageType\"\xde\x03\n\x12OrchestratorAction\x12\n\n\x02id\x18\x01 \x01(\x05\x12+\n\x0cscheduleTask\x18\x02 \x01(\x0b\x32\x13.ScheduleTaskActionH\x00\x12?\n\x16\x63reateSubOrchestration\x18\x03 \x01(\x0b\x32\x1d.CreateSubOrchestrationActionH\x00\x12)\n\x0b\x63reateTimer\x18\x04 \x01(\x0b\x32\x12.CreateTimerActionH\x00\x12%\n\tsendEvent\x18\x05 \x01(\x0b\x32\x10.SendEventActionH\x00\x12=\n\x15\x63ompleteOrchestration\x18\x06 \x01(\x0b\x32\x1c.CompleteOrchestrationActionH\x00\x12?\n\x16terminateOrchestration\x18\x07 \x01(\x0b\x32\x1d.TerminateOrchestrationActionH\x00\x12\x35\n\x11sendEntityMessage\x18\x08 \x01(\x0b\x32\x18.SendEntityMessageActionH\x00\x12 \n\x06router\x18\t \x01(\x0b\x32\x0b.TaskRouterH\x01\x88\x01\x01\x42\x18\n\x16orchestratorActionTypeB\t\n\x07_router\"\xa9\x02\n\x13OrchestratorRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12!\n\npastEvents\x18\x03 \x03(\x0b\x32\r.HistoryEvent\x12 \n\tnewEvents\x18\x04 \x03(\x0b\x32\r.HistoryEvent\x12\x37\n\x10\x65ntityParameters\x18\x05 \x01(\x0b\x32\x1d.OrchestratorEntityParameters\x12 \n\x18requiresHistoryStreaming\x18\x06 \x01(\x08\x12 \n\x06router\x18\x07 \x01(\x0b\x32\x0b.TaskRouterH\x00\x88\x01\x01\x42\t\n\x07_router\"\xd6\x01\n\x14OrchestratorResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12$\n\x07\x61\x63tions\x18\x02 \x03(\x0b\x32\x13.OrchestratorAction\x12\x32\n\x0c\x63ustomStatus\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x17\n\x0f\x63ompletionToken\x18\x04 \x01(\t\x12\x37\n\x12numEventsProcessed\x18\x05 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\"\xce\x03\n\x15\x43reateInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12;\n\x17scheduledStartTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12?\n\x1aorchestrationIdReusePolicy\x18\x06 \x01(\x0b\x32\x1b.OrchestrationIdReusePolicy\x12\x31\n\x0b\x65xecutionId\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x04tags\x18\x08 \x03(\x0b\x32 .CreateInstanceRequest.TagsEntry\x12)\n\x12parentTraceContext\x18\t \x01(\x0b\x32\r.TraceContext\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"w\n\x1aOrchestrationIdReusePolicy\x12-\n\x0foperationStatus\x18\x01 \x03(\x0e\x32\x14.OrchestrationStatus\x12*\n\x06\x61\x63tion\x18\x02 \x01(\x0e\x32\x1a.CreateOrchestrationAction\",\n\x16\x43reateInstanceResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\"E\n\x12GetInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x1b\n\x13getInputsAndOutputs\x18\x02 \x01(\x08\"V\n\x13GetInstanceResponse\x12\x0e\n\x06\x65xists\x18\x01 \x01(\x08\x12/\n\x12orchestrationState\x18\x02 \x01(\x0b\x32\x13.OrchestrationState\"Y\n\x15RewindInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x18\n\x16RewindInstanceResponse\"\xfe\x05\n\x12OrchestrationState\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\x13orchestrationStatus\x18\x04 \x01(\x0e\x32\x14.OrchestrationStatus\x12;\n\x17scheduledStartTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x34\n\x10\x63reatedTimestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x38\n\x14lastUpdatedTimestamp\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\x05input\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12,\n\x06output\x18\t \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0c\x63ustomStatus\x18\n \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x0b \x01(\x0b\x32\x13.TaskFailureDetails\x12\x31\n\x0b\x65xecutionId\x18\x0c \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x12\x63ompletedTimestamp\x18\r \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x36\n\x10parentInstanceId\x18\x0e \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x04tags\x18\x0f \x03(\x0b\x32\x1d.OrchestrationState.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"b\n\x11RaiseEventRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x14\n\x12RaiseEventResponse\"g\n\x10TerminateRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06output\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x11\n\trecursive\x18\x03 \x01(\x08\"\x13\n\x11TerminateResponse\"R\n\x0eSuspendRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x11\n\x0fSuspendResponse\"Q\n\rResumeRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x10\n\x0eResumeResponse\"6\n\x15QueryInstancesRequest\x12\x1d\n\x05query\x18\x01 \x01(\x0b\x32\x0e.InstanceQuery\"\x82\x03\n\rInstanceQuery\x12+\n\rruntimeStatus\x18\x01 \x03(\x0e\x32\x14.OrchestrationStatus\x12\x33\n\x0f\x63reatedTimeFrom\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rcreatedTimeTo\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0ctaskHubNames\x18\x04 \x03(\x0b\x32\x1c.google.protobuf.StringValue\x12\x18\n\x10maxInstanceCount\x18\x05 \x01(\x05\x12\x37\n\x11\x63ontinuationToken\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10instanceIdPrefix\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1d\n\x15\x66\x65tchInputsAndOutputs\x18\x08 \x01(\x08\"\x82\x01\n\x16QueryInstancesResponse\x12/\n\x12orchestrationState\x18\x01 \x03(\x0b\x32\x13.OrchestrationState\x12\x37\n\x11\x63ontinuationToken\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x80\x01\n\x15PurgeInstancesRequest\x12\x14\n\ninstanceId\x18\x01 \x01(\tH\x00\x12\x33\n\x13purgeInstanceFilter\x18\x02 \x01(\x0b\x32\x14.PurgeInstanceFilterH\x00\x12\x11\n\trecursive\x18\x03 \x01(\x08\x42\t\n\x07request\"\xaa\x01\n\x13PurgeInstanceFilter\x12\x33\n\x0f\x63reatedTimeFrom\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rcreatedTimeTo\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\rruntimeStatus\x18\x03 \x03(\x0e\x32\x14.OrchestrationStatus\"f\n\x16PurgeInstancesResponse\x12\x1c\n\x14\x64\x65letedInstanceCount\x18\x01 \x01(\x05\x12.\n\nisComplete\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\"0\n\x14\x43reateTaskHubRequest\x12\x18\n\x10recreateIfExists\x18\x01 \x01(\x08\"\x17\n\x15\x43reateTaskHubResponse\"\x16\n\x14\x44\x65leteTaskHubRequest\"\x17\n\x15\x44\x65leteTaskHubResponse\"\xaa\x01\n\x13SignalEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x11\n\trequestId\x18\x04 \x01(\t\x12\x31\n\rscheduledTime\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\x16\n\x14SignalEntityResponse\"<\n\x10GetEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x14\n\x0cincludeState\x18\x02 \x01(\x08\"D\n\x11GetEntityResponse\x12\x0e\n\x06\x65xists\x18\x01 \x01(\x08\x12\x1f\n\x06\x65ntity\x18\x02 \x01(\x0b\x32\x0f.EntityMetadata\"\xcb\x02\n\x0b\x45ntityQuery\x12:\n\x14instanceIdStartsWith\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x34\n\x10lastModifiedFrom\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0elastModifiedTo\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x14\n\x0cincludeState\x18\x04 \x01(\x08\x12\x18\n\x10includeTransient\x18\x05 \x01(\x08\x12-\n\x08pageSize\x18\x06 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12\x37\n\x11\x63ontinuationToken\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"3\n\x14QueryEntitiesRequest\x12\x1b\n\x05query\x18\x01 \x01(\x0b\x32\x0c.EntityQuery\"s\n\x15QueryEntitiesResponse\x12!\n\x08\x65ntities\x18\x01 \x03(\x0b\x32\x0f.EntityMetadata\x12\x37\n\x11\x63ontinuationToken\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xdb\x01\n\x0e\x45ntityMetadata\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x34\n\x10lastModifiedTime\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x18\n\x10\x62\x61\x63klogQueueSize\x18\x03 \x01(\x05\x12.\n\x08lockedBy\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x0fserializedState\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x8f\x01\n\x19\x43leanEntityStorageRequest\x12\x37\n\x11\x63ontinuationToken\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1b\n\x13removeEmptyEntities\x18\x02 \x01(\x08\x12\x1c\n\x14releaseOrphanedLocks\x18\x03 \x01(\x08\"\x92\x01\n\x1a\x43leanEntityStorageResponse\x12\x37\n\x11\x63ontinuationToken\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1c\n\x14\x65mptyEntitiesRemoved\x18\x02 \x01(\x05\x12\x1d\n\x15orphanedLocksReleased\x18\x03 \x01(\x05\"]\n\x1cOrchestratorEntityParameters\x12=\n\x1a\x65ntityMessageReorderWindow\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\"\x82\x01\n\x12\x45ntityBatchRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65ntityState\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12%\n\noperations\x18\x03 \x03(\x0b\x32\x11.OperationRequest\"\xfa\x01\n\x11\x45ntityBatchResult\x12!\n\x07results\x18\x01 \x03(\x0b\x32\x10.OperationResult\x12!\n\x07\x61\x63tions\x18\x02 \x03(\x0b\x32\x10.OperationAction\x12\x31\n\x0b\x65ntityState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\x12\x17\n\x0f\x63ompletionToken\x18\x05 \x01(\t\x12&\n\x0eoperationInfos\x18\x06 \x03(\x0b\x32\x0e.OperationInfo\"\x95\x01\n\rEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x13\n\x0b\x65xecutionId\x18\x02 \x01(\t\x12\x31\n\x0b\x65ntityState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12(\n\x11operationRequests\x18\x04 \x03(\x0b\x32\r.HistoryEvent\"e\n\x10OperationRequest\x12\x11\n\toperation\x18\x01 \x01(\t\x12\x11\n\trequestId\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"w\n\x0fOperationResult\x12*\n\x07success\x18\x01 \x01(\x0b\x32\x17.OperationResultSuccessH\x00\x12*\n\x07\x66\x61ilure\x18\x02 \x01(\x0b\x32\x17.OperationResultFailureH\x00\x42\x0c\n\nresultType\"W\n\rOperationInfo\x12\x11\n\trequestId\x18\x01 \x01(\t\x12\x33\n\x13responseDestination\x18\x02 \x01(\x0b\x32\x16.OrchestrationInstance\"F\n\x16OperationResultSuccess\x12,\n\x06result\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"E\n\x16OperationResultFailure\x12+\n\x0e\x66\x61ilureDetails\x18\x01 \x01(\x0b\x32\x13.TaskFailureDetails\"\x9c\x01\n\x0fOperationAction\x12\n\n\x02id\x18\x01 \x01(\x05\x12\'\n\nsendSignal\x18\x02 \x01(\x0b\x32\x11.SendSignalActionH\x00\x12=\n\x15startNewOrchestration\x18\x03 \x01(\x0b\x32\x1c.StartNewOrchestrationActionH\x00\x42\x15\n\x13operationActionType\"\x94\x01\n\x10SendSignalAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rscheduledTime\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xce\x01\n\x1bStartNewOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rscheduledTime\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"5\n\x1a\x41\x62\x61ndonActivityTaskRequest\x12\x17\n\x0f\x63ompletionToken\x18\x01 \x01(\t\"\x1d\n\x1b\x41\x62\x61ndonActivityTaskResponse\":\n\x1f\x41\x62\x61ndonOrchestrationTaskRequest\x12\x17\n\x0f\x63ompletionToken\x18\x01 \x01(\t\"\"\n AbandonOrchestrationTaskResponse\"3\n\x18\x41\x62\x61ndonEntityTaskRequest\x12\x17\n\x0f\x63ompletionToken\x18\x01 \x01(\t\"\x1b\n\x19\x41\x62\x61ndonEntityTaskResponse\"\xb9\x01\n\x13GetWorkItemsRequest\x12+\n#maxConcurrentOrchestrationWorkItems\x18\x01 \x01(\x05\x12&\n\x1emaxConcurrentActivityWorkItems\x18\x02 \x01(\x05\x12$\n\x1cmaxConcurrentEntityWorkItems\x18\x03 \x01(\x05\x12\'\n\x0c\x63\x61pabilities\x18\n \x03(\x0e\x32\x11.WorkerCapability\"\x8c\x02\n\x08WorkItem\x12\x33\n\x13orchestratorRequest\x18\x01 \x01(\x0b\x32\x14.OrchestratorRequestH\x00\x12+\n\x0f\x61\x63tivityRequest\x18\x02 \x01(\x0b\x32\x10.ActivityRequestH\x00\x12,\n\rentityRequest\x18\x03 \x01(\x0b\x32\x13.EntityBatchRequestH\x00\x12!\n\nhealthPing\x18\x04 \x01(\x0b\x32\x0b.HealthPingH\x00\x12)\n\x0f\x65ntityRequestV2\x18\x05 \x01(\x0b\x32\x0e.EntityRequestH\x00\x12\x17\n\x0f\x63ompletionToken\x18\n \x01(\tB\t\n\x07request\"\x16\n\x14\x43ompleteTaskResponse\"\x0c\n\nHealthPing\"\x84\x01\n\x1cStreamInstanceHistoryRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1d\n\x15\x66orWorkItemProcessing\x18\x03 \x01(\x08\"-\n\x0cHistoryChunk\x12\x1d\n\x06\x65vents\x18\x01 \x03(\x0b\x32\r.HistoryEvent\"\xbd\x01\n\x1dRerunWorkflowFromEventRequest\x12\x18\n\x10sourceInstanceID\x18\x01 \x01(\t\x12\x0f\n\x07\x65ventID\x18\x02 \x01(\r\x12\x1a\n\rnewInstanceID\x18\x03 \x01(\tH\x00\x88\x01\x01\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x16\n\x0eoverwriteInput\x18\x05 \x01(\x08\x42\x10\n\x0e_newInstanceID\"7\n\x1eRerunWorkflowFromEventResponse\x12\x15\n\rnewInstanceID\x18\x01 \x01(\t*\xb5\x02\n\x13OrchestrationStatus\x12 \n\x1cORCHESTRATION_STATUS_RUNNING\x10\x00\x12\"\n\x1eORCHESTRATION_STATUS_COMPLETED\x10\x01\x12)\n%ORCHESTRATION_STATUS_CONTINUED_AS_NEW\x10\x02\x12\x1f\n\x1bORCHESTRATION_STATUS_FAILED\x10\x03\x12!\n\x1dORCHESTRATION_STATUS_CANCELED\x10\x04\x12#\n\x1fORCHESTRATION_STATUS_TERMINATED\x10\x05\x12 \n\x1cORCHESTRATION_STATUS_PENDING\x10\x06\x12\"\n\x1eORCHESTRATION_STATUS_SUSPENDED\x10\x07*A\n\x19\x43reateOrchestrationAction\x12\t\n\x05\x45RROR\x10\x00\x12\n\n\x06IGNORE\x10\x01\x12\r\n\tTERMINATE\x10\x02*^\n\x10WorkerCapability\x12!\n\x1dWORKER_CAPABILITY_UNSPECIFIED\x10\x00\x12\'\n#WORKER_CAPABILITY_HISTORY_STREAMING\x10\x01\x32\xb6\x0e\n\x15TaskHubSidecarService\x12\x37\n\x05Hello\x12\x16.google.protobuf.Empty\x1a\x16.google.protobuf.Empty\x12@\n\rStartInstance\x12\x16.CreateInstanceRequest\x1a\x17.CreateInstanceResponse\x12\x38\n\x0bGetInstance\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x41\n\x0eRewindInstance\x12\x16.RewindInstanceRequest\x1a\x17.RewindInstanceResponse\x12\x41\n\x14WaitForInstanceStart\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x46\n\x19WaitForInstanceCompletion\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x35\n\nRaiseEvent\x12\x12.RaiseEventRequest\x1a\x13.RaiseEventResponse\x12:\n\x11TerminateInstance\x12\x11.TerminateRequest\x1a\x12.TerminateResponse\x12\x34\n\x0fSuspendInstance\x12\x0f.SuspendRequest\x1a\x10.SuspendResponse\x12\x31\n\x0eResumeInstance\x12\x0e.ResumeRequest\x1a\x0f.ResumeResponse\x12\x41\n\x0eQueryInstances\x12\x16.QueryInstancesRequest\x1a\x17.QueryInstancesResponse\x12\x41\n\x0ePurgeInstances\x12\x16.PurgeInstancesRequest\x1a\x17.PurgeInstancesResponse\x12\x31\n\x0cGetWorkItems\x12\x14.GetWorkItemsRequest\x1a\t.WorkItem0\x01\x12@\n\x14\x43ompleteActivityTask\x12\x11.ActivityResponse\x1a\x15.CompleteTaskResponse\x12H\n\x18\x43ompleteOrchestratorTask\x12\x15.OrchestratorResponse\x1a\x15.CompleteTaskResponse\x12?\n\x12\x43ompleteEntityTask\x12\x12.EntityBatchResult\x1a\x15.CompleteTaskResponse\x12G\n\x15StreamInstanceHistory\x12\x1d.StreamInstanceHistoryRequest\x1a\r.HistoryChunk0\x01\x12>\n\rCreateTaskHub\x12\x15.CreateTaskHubRequest\x1a\x16.CreateTaskHubResponse\x12>\n\rDeleteTaskHub\x12\x15.DeleteTaskHubRequest\x1a\x16.DeleteTaskHubResponse\x12;\n\x0cSignalEntity\x12\x14.SignalEntityRequest\x1a\x15.SignalEntityResponse\x12\x32\n\tGetEntity\x12\x11.GetEntityRequest\x1a\x12.GetEntityResponse\x12>\n\rQueryEntities\x12\x15.QueryEntitiesRequest\x1a\x16.QueryEntitiesResponse\x12M\n\x12\x43leanEntityStorage\x12\x1a.CleanEntityStorageRequest\x1a\x1b.CleanEntityStorageResponse\x12X\n\x1b\x41\x62\x61ndonTaskActivityWorkItem\x12\x1b.AbandonActivityTaskRequest\x1a\x1c.AbandonActivityTaskResponse\x12\x66\n\x1f\x41\x62\x61ndonTaskOrchestratorWorkItem\x12 .AbandonOrchestrationTaskRequest\x1a!.AbandonOrchestrationTaskResponse\x12R\n\x19\x41\x62\x61ndonTaskEntityWorkItem\x12\x19.AbandonEntityTaskRequest\x1a\x1a.AbandonEntityTaskResponse\x12Y\n\x16RerunWorkflowFromEvent\x12\x1e.RerunWorkflowFromEventRequest\x1a\x1f.RerunWorkflowFromEventResponseBV\n+io.dapr.durabletask.implementation.protobufZ\x0b/api/protos\xaa\x02\x19\x44\x61pr.DurableTask.Protobufb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'durabletask.internal.orchestrator_service_pb2', _globals) -if _descriptor._USE_C_DESCRIPTORS == False: - _globals['DESCRIPTOR']._options = None - _globals['DESCRIPTOR']._serialized_options = b'\n1com.microsoft.durabletask.implementation.protobufZ\020/internal/protos\252\002\036Microsoft.DurableTask.Protobuf' - _globals['_TRACECONTEXT'].fields_by_name['spanID']._options = None +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'\n+io.dapr.durabletask.implementation.protobufZ\013/api/protos\252\002\031Dapr.DurableTask.Protobuf' + _globals['_TRACECONTEXT'].fields_by_name['spanID']._loaded_options = None _globals['_TRACECONTEXT'].fields_by_name['spanID']._serialized_options = b'\030\001' - _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._options = None + _globals['_EXECUTIONSTARTEDEVENT_TAGSENTRY']._loaded_options = None + _globals['_EXECUTIONSTARTEDEVENT_TAGSENTRY']._serialized_options = b'8\001' + _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._loaded_options = None _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_options = b'8\001' - _globals['_ORCHESTRATIONSTATUS']._serialized_start=14316 - _globals['_ORCHESTRATIONSTATUS']._serialized_end=14625 - _globals['_CREATEORCHESTRATIONACTION']._serialized_start=14627 - _globals['_CREATEORCHESTRATIONACTION']._serialized_end=14692 - _globals['_WORKERCAPABILITY']._serialized_start=14694 - _globals['_WORKERCAPABILITY']._serialized_end=14788 - _globals['_ORCHESTRATIONINSTANCE']._serialized_start=177 - _globals['_ORCHESTRATIONINSTANCE']._serialized_end=271 - _globals['_ACTIVITYREQUEST']._serialized_start=274 - _globals['_ACTIVITYREQUEST']._serialized_end=511 - _globals['_ACTIVITYRESPONSE']._serialized_start=514 - _globals['_ACTIVITYRESPONSE']._serialized_end=684 - _globals['_TASKFAILUREDETAILS']._serialized_start=687 - _globals['_TASKFAILUREDETAILS']._serialized_end=865 - _globals['_PARENTINSTANCEINFO']._serialized_start=868 - _globals['_PARENTINSTANCEINFO']._serialized_end=1059 - _globals['_TRACECONTEXT']._serialized_start=1061 - _globals['_TRACECONTEXT']._serialized_end=1166 - _globals['_EXECUTIONSTARTEDEVENT']._serialized_start=1169 - _globals['_EXECUTIONSTARTEDEVENT']._serialized_end=1561 - _globals['_EXECUTIONCOMPLETEDEVENT']._serialized_start=1564 - _globals['_EXECUTIONCOMPLETEDEVENT']._serialized_end=1731 - _globals['_EXECUTIONTERMINATEDEVENT']._serialized_start=1733 - _globals['_EXECUTIONTERMINATEDEVENT']._serialized_end=1821 - _globals['_TASKSCHEDULEDEVENT']._serialized_start=1824 - _globals['_TASKSCHEDULEDEVENT']._serialized_end=1993 - _globals['_TASKCOMPLETEDEVENT']._serialized_start=1995 - _globals['_TASKCOMPLETEDEVENT']._serialized_end=2086 - _globals['_TASKFAILEDEVENT']._serialized_start=2088 - _globals['_TASKFAILEDEVENT']._serialized_end=2175 - _globals['_SUBORCHESTRATIONINSTANCECREATEDEVENT']._serialized_start=2178 - _globals['_SUBORCHESTRATIONINSTANCECREATEDEVENT']._serialized_end=2385 - _globals['_SUBORCHESTRATIONINSTANCECOMPLETEDEVENT']._serialized_start=2387 - _globals['_SUBORCHESTRATIONINSTANCECOMPLETEDEVENT']._serialized_end=2498 - _globals['_SUBORCHESTRATIONINSTANCEFAILEDEVENT']._serialized_start=2500 - _globals['_SUBORCHESTRATIONINSTANCEFAILEDEVENT']._serialized_end=2607 - _globals['_TIMERCREATEDEVENT']._serialized_start=2609 - _globals['_TIMERCREATEDEVENT']._serialized_end=2672 - _globals['_TIMERFIREDEVENT']._serialized_start=2674 - _globals['_TIMERFIREDEVENT']._serialized_end=2752 - _globals['_ORCHESTRATORSTARTEDEVENT']._serialized_start=2754 - _globals['_ORCHESTRATORSTARTEDEVENT']._serialized_end=2780 - _globals['_ORCHESTRATORCOMPLETEDEVENT']._serialized_start=2782 - _globals['_ORCHESTRATORCOMPLETEDEVENT']._serialized_end=2810 - _globals['_EVENTSENTEVENT']._serialized_start=2812 - _globals['_EVENTSENTEVENT']._serialized_end=2907 - _globals['_EVENTRAISEDEVENT']._serialized_start=2909 - _globals['_EVENTRAISEDEVENT']._serialized_end=2986 - _globals['_GENERICEVENT']._serialized_start=2988 - _globals['_GENERICEVENT']._serialized_end=3046 - _globals['_HISTORYSTATEEVENT']._serialized_start=3048 - _globals['_HISTORYSTATEEVENT']._serialized_end=3116 - _globals['_CONTINUEASNEWEVENT']._serialized_start=3118 - _globals['_CONTINUEASNEWEVENT']._serialized_end=3183 - _globals['_EXECUTIONSUSPENDEDEVENT']._serialized_start=3185 - _globals['_EXECUTIONSUSPENDEDEVENT']._serialized_end=3255 - _globals['_EXECUTIONRESUMEDEVENT']._serialized_start=3257 - _globals['_EXECUTIONRESUMEDEVENT']._serialized_end=3325 - _globals['_ENTITYOPERATIONSIGNALEDEVENT']._serialized_start=3328 - _globals['_ENTITYOPERATIONSIGNALEDEVENT']._serialized_end=3548 - _globals['_ENTITYOPERATIONCALLEDEVENT']._serialized_start=3551 - _globals['_ENTITYOPERATIONCALLEDEVENT']._serialized_end=3882 - _globals['_ENTITYLOCKREQUESTEDEVENT']._serialized_start=3885 - _globals['_ENTITYLOCKREQUESTEDEVENT']._serialized_end=4029 - _globals['_ENTITYOPERATIONCOMPLETEDEVENT']._serialized_start=4031 - _globals['_ENTITYOPERATIONCOMPLETEDEVENT']._serialized_end=4127 - _globals['_ENTITYOPERATIONFAILEDEVENT']._serialized_start=4129 - _globals['_ENTITYOPERATIONFAILEDEVENT']._serialized_end=4221 - _globals['_ENTITYUNLOCKSENTEVENT']._serialized_start=4224 - _globals['_ENTITYUNLOCKSENTEVENT']._serialized_end=4386 - _globals['_ENTITYLOCKGRANTEDEVENT']._serialized_start=4388 - _globals['_ENTITYLOCKGRANTEDEVENT']._serialized_end=4439 - _globals['_HISTORYEVENT']._serialized_start=4442 - _globals['_HISTORYEVENT']._serialized_end=6022 - _globals['_SCHEDULETASKACTION']._serialized_start=6024 - _globals['_SCHEDULETASKACTION']._serialized_end=6150 - _globals['_CREATESUBORCHESTRATIONACTION']._serialized_start=6153 - _globals['_CREATESUBORCHESTRATIONACTION']._serialized_end=6309 - _globals['_CREATETIMERACTION']._serialized_start=6311 - _globals['_CREATETIMERACTION']._serialized_end=6374 - _globals['_SENDEVENTACTION']._serialized_start=6376 - _globals['_SENDEVENTACTION']._serialized_end=6493 - _globals['_COMPLETEORCHESTRATIONACTION']._serialized_start=6496 - _globals['_COMPLETEORCHESTRATIONACTION']._serialized_end=6804 - _globals['_TERMINATEORCHESTRATIONACTION']._serialized_start=6806 - _globals['_TERMINATEORCHESTRATIONACTION']._serialized_end=6919 - _globals['_ORCHESTRATORACTION']._serialized_start=6922 - _globals['_ORCHESTRATORACTION']._serialized_end=7300 - _globals['_ORCHESTRATORREQUEST']._serialized_start=7303 - _globals['_ORCHESTRATORREQUEST']._serialized_end=7555 - _globals['_ORCHESTRATORRESPONSE']._serialized_start=7558 - _globals['_ORCHESTRATORRESPONSE']._serialized_end=7772 - _globals['_CREATEINSTANCEREQUEST']._serialized_start=7775 - _globals['_CREATEINSTANCEREQUEST']._serialized_end=8194 - _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_start=8151 - _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_end=8194 - _globals['_ORCHESTRATIONIDREUSEPOLICY']._serialized_start=8196 - _globals['_ORCHESTRATIONIDREUSEPOLICY']._serialized_end=8315 - _globals['_CREATEINSTANCERESPONSE']._serialized_start=8317 - _globals['_CREATEINSTANCERESPONSE']._serialized_end=8361 - _globals['_GETINSTANCEREQUEST']._serialized_start=8363 - _globals['_GETINSTANCEREQUEST']._serialized_end=8432 - _globals['_GETINSTANCERESPONSE']._serialized_start=8434 - _globals['_GETINSTANCERESPONSE']._serialized_end=8520 - _globals['_REWINDINSTANCEREQUEST']._serialized_start=8522 - _globals['_REWINDINSTANCEREQUEST']._serialized_end=8611 - _globals['_REWINDINSTANCERESPONSE']._serialized_start=8613 - _globals['_REWINDINSTANCERESPONSE']._serialized_end=8637 - _globals['_ORCHESTRATIONSTATE']._serialized_start=8640 - _globals['_ORCHESTRATIONSTATE']._serialized_end=9316 - _globals['_RAISEEVENTREQUEST']._serialized_start=9318 - _globals['_RAISEEVENTREQUEST']._serialized_end=9416 - _globals['_RAISEEVENTRESPONSE']._serialized_start=9418 - _globals['_RAISEEVENTRESPONSE']._serialized_end=9438 - _globals['_TERMINATEREQUEST']._serialized_start=9440 - _globals['_TERMINATEREQUEST']._serialized_end=9543 - _globals['_TERMINATERESPONSE']._serialized_start=9545 - _globals['_TERMINATERESPONSE']._serialized_end=9564 - _globals['_SUSPENDREQUEST']._serialized_start=9566 - _globals['_SUSPENDREQUEST']._serialized_end=9648 - _globals['_SUSPENDRESPONSE']._serialized_start=9650 - _globals['_SUSPENDRESPONSE']._serialized_end=9667 - _globals['_RESUMEREQUEST']._serialized_start=9669 - _globals['_RESUMEREQUEST']._serialized_end=9750 - _globals['_RESUMERESPONSE']._serialized_start=9752 - _globals['_RESUMERESPONSE']._serialized_end=9768 - _globals['_QUERYINSTANCESREQUEST']._serialized_start=9770 - _globals['_QUERYINSTANCESREQUEST']._serialized_end=9824 - _globals['_INSTANCEQUERY']._serialized_start=9827 - _globals['_INSTANCEQUERY']._serialized_end=10213 - _globals['_QUERYINSTANCESRESPONSE']._serialized_start=10216 - _globals['_QUERYINSTANCESRESPONSE']._serialized_end=10346 - _globals['_PURGEINSTANCESREQUEST']._serialized_start=10349 - _globals['_PURGEINSTANCESREQUEST']._serialized_end=10477 - _globals['_PURGEINSTANCEFILTER']._serialized_start=10480 - _globals['_PURGEINSTANCEFILTER']._serialized_end=10650 - _globals['_PURGEINSTANCESRESPONSE']._serialized_start=10652 - _globals['_PURGEINSTANCESRESPONSE']._serialized_end=10706 - _globals['_CREATETASKHUBREQUEST']._serialized_start=10708 - _globals['_CREATETASKHUBREQUEST']._serialized_end=10756 - _globals['_CREATETASKHUBRESPONSE']._serialized_start=10758 - _globals['_CREATETASKHUBRESPONSE']._serialized_end=10781 - _globals['_DELETETASKHUBREQUEST']._serialized_start=10783 - _globals['_DELETETASKHUBREQUEST']._serialized_end=10805 - _globals['_DELETETASKHUBRESPONSE']._serialized_start=10807 - _globals['_DELETETASKHUBRESPONSE']._serialized_end=10830 - _globals['_SIGNALENTITYREQUEST']._serialized_start=10833 - _globals['_SIGNALENTITYREQUEST']._serialized_end=11003 - _globals['_SIGNALENTITYRESPONSE']._serialized_start=11005 - _globals['_SIGNALENTITYRESPONSE']._serialized_end=11027 - _globals['_GETENTITYREQUEST']._serialized_start=11029 - _globals['_GETENTITYREQUEST']._serialized_end=11089 - _globals['_GETENTITYRESPONSE']._serialized_start=11091 - _globals['_GETENTITYRESPONSE']._serialized_end=11159 - _globals['_ENTITYQUERY']._serialized_start=11162 - _globals['_ENTITYQUERY']._serialized_end=11493 - _globals['_QUERYENTITIESREQUEST']._serialized_start=11495 - _globals['_QUERYENTITIESREQUEST']._serialized_end=11546 - _globals['_QUERYENTITIESRESPONSE']._serialized_start=11548 - _globals['_QUERYENTITIESRESPONSE']._serialized_end=11663 - _globals['_ENTITYMETADATA']._serialized_start=11666 - _globals['_ENTITYMETADATA']._serialized_end=11885 - _globals['_CLEANENTITYSTORAGEREQUEST']._serialized_start=11888 - _globals['_CLEANENTITYSTORAGEREQUEST']._serialized_end=12031 - _globals['_CLEANENTITYSTORAGERESPONSE']._serialized_start=12034 - _globals['_CLEANENTITYSTORAGERESPONSE']._serialized_end=12180 - _globals['_ORCHESTRATORENTITYPARAMETERS']._serialized_start=12182 - _globals['_ORCHESTRATORENTITYPARAMETERS']._serialized_end=12275 - _globals['_ENTITYBATCHREQUEST']._serialized_start=12278 - _globals['_ENTITYBATCHREQUEST']._serialized_end=12408 - _globals['_ENTITYBATCHRESULT']._serialized_start=12411 - _globals['_ENTITYBATCHRESULT']._serialized_end=12596 - _globals['_ENTITYREQUEST']._serialized_start=12599 - _globals['_ENTITYREQUEST']._serialized_end=12748 - _globals['_OPERATIONREQUEST']._serialized_start=12750 - _globals['_OPERATIONREQUEST']._serialized_end=12851 - _globals['_OPERATIONRESULT']._serialized_start=12853 - _globals['_OPERATIONRESULT']._serialized_end=12972 - _globals['_OPERATIONRESULTSUCCESS']._serialized_start=12974 - _globals['_OPERATIONRESULTSUCCESS']._serialized_end=13044 - _globals['_OPERATIONRESULTFAILURE']._serialized_start=13046 - _globals['_OPERATIONRESULTFAILURE']._serialized_end=13115 - _globals['_OPERATIONACTION']._serialized_start=13118 - _globals['_OPERATIONACTION']._serialized_end=13274 - _globals['_SENDSIGNALACTION']._serialized_start=13277 - _globals['_SENDSIGNALACTION']._serialized_end=13425 - _globals['_STARTNEWORCHESTRATIONACTION']._serialized_start=13428 - _globals['_STARTNEWORCHESTRATIONACTION']._serialized_end=13634 - _globals['_GETWORKITEMSREQUEST']._serialized_start=13637 - _globals['_GETWORKITEMSREQUEST']._serialized_end=13822 - _globals['_WORKITEM']._serialized_start=13825 - _globals['_WORKITEM']._serialized_end=14093 - _globals['_COMPLETETASKRESPONSE']._serialized_start=14095 - _globals['_COMPLETETASKRESPONSE']._serialized_end=14117 - _globals['_HEALTHPING']._serialized_start=14119 - _globals['_HEALTHPING']._serialized_end=14131 - _globals['_STREAMINSTANCEHISTORYREQUEST']._serialized_start=14134 - _globals['_STREAMINSTANCEHISTORYREQUEST']._serialized_end=14266 - _globals['_HISTORYCHUNK']._serialized_start=14268 - _globals['_HISTORYCHUNK']._serialized_end=14313 - _globals['_TASKHUBSIDECARSERVICE']._serialized_start=14791 - _globals['_TASKHUBSIDECARSERVICE']._serialized_end=16268 + _globals['_ORCHESTRATIONSTATE_TAGSENTRY']._loaded_options = None + _globals['_ORCHESTRATIONSTATE_TAGSENTRY']._serialized_options = b'8\001' + _globals['_ORCHESTRATIONSTATUS']._serialized_start=16113 + _globals['_ORCHESTRATIONSTATUS']._serialized_end=16422 + _globals['_CREATEORCHESTRATIONACTION']._serialized_start=16424 + _globals['_CREATEORCHESTRATIONACTION']._serialized_end=16489 + _globals['_WORKERCAPABILITY']._serialized_start=16491 + _globals['_WORKERCAPABILITY']._serialized_end=16585 + _globals['_TASKROUTER']._serialized_start=177 + _globals['_TASKROUTER']._serialized_end=252 + _globals['_ORCHESTRATIONINSTANCE']._serialized_start=254 + _globals['_ORCHESTRATIONINSTANCE']._serialized_end=348 + _globals['_ACTIVITYREQUEST']._serialized_start=351 + _globals['_ACTIVITYREQUEST']._serialized_end=613 + _globals['_ACTIVITYRESPONSE']._serialized_start=616 + _globals['_ACTIVITYRESPONSE']._serialized_end=786 + _globals['_TASKFAILUREDETAILS']._serialized_start=789 + _globals['_TASKFAILUREDETAILS']._serialized_end=967 + _globals['_PARENTINSTANCEINFO']._serialized_start=970 + _globals['_PARENTINSTANCEINFO']._serialized_end=1191 + _globals['_TRACECONTEXT']._serialized_start=1193 + _globals['_TRACECONTEXT']._serialized_end=1298 + _globals['_EXECUTIONSTARTEDEVENT']._serialized_start=1301 + _globals['_EXECUTIONSTARTEDEVENT']._serialized_end=1786 + _globals['_EXECUTIONSTARTEDEVENT_TAGSENTRY']._serialized_start=1743 + _globals['_EXECUTIONSTARTEDEVENT_TAGSENTRY']._serialized_end=1786 + _globals['_EXECUTIONCOMPLETEDEVENT']._serialized_start=1789 + _globals['_EXECUTIONCOMPLETEDEVENT']._serialized_end=1956 + _globals['_EXECUTIONTERMINATEDEVENT']._serialized_start=1958 + _globals['_EXECUTIONTERMINATEDEVENT']._serialized_end=2046 + _globals['_TASKSCHEDULEDEVENT']._serialized_start=2049 + _globals['_TASKSCHEDULEDEVENT']._serialized_end=2243 + _globals['_TASKCOMPLETEDEVENT']._serialized_start=2245 + _globals['_TASKCOMPLETEDEVENT']._serialized_end=2361 + _globals['_TASKFAILEDEVENT']._serialized_start=2363 + _globals['_TASKFAILEDEVENT']._serialized_end=2475 + _globals['_SUBORCHESTRATIONINSTANCECREATEDEVENT']._serialized_start=2478 + _globals['_SUBORCHESTRATIONINSTANCECREATEDEVENT']._serialized_end=2685 + _globals['_SUBORCHESTRATIONINSTANCECOMPLETEDEVENT']._serialized_start=2687 + _globals['_SUBORCHESTRATIONINSTANCECOMPLETEDEVENT']._serialized_end=2798 + _globals['_SUBORCHESTRATIONINSTANCEFAILEDEVENT']._serialized_start=2800 + _globals['_SUBORCHESTRATIONINSTANCEFAILEDEVENT']._serialized_end=2907 + _globals['_TIMERCREATEDEVENT']._serialized_start=2909 + _globals['_TIMERCREATEDEVENT']._serialized_end=3000 + _globals['_TIMERFIREDEVENT']._serialized_start=3002 + _globals['_TIMERFIREDEVENT']._serialized_end=3080 + _globals['_ORCHESTRATORSTARTEDEVENT']._serialized_start=3082 + _globals['_ORCHESTRATORSTARTEDEVENT']._serialized_end=3108 + _globals['_ORCHESTRATORCOMPLETEDEVENT']._serialized_start=3110 + _globals['_ORCHESTRATORCOMPLETEDEVENT']._serialized_end=3138 + _globals['_EVENTSENTEVENT']._serialized_start=3140 + _globals['_EVENTSENTEVENT']._serialized_end=3235 + _globals['_EVENTRAISEDEVENT']._serialized_start=3237 + _globals['_EVENTRAISEDEVENT']._serialized_end=3314 + _globals['_GENERICEVENT']._serialized_start=3316 + _globals['_GENERICEVENT']._serialized_end=3374 + _globals['_HISTORYSTATEEVENT']._serialized_start=3376 + _globals['_HISTORYSTATEEVENT']._serialized_end=3444 + _globals['_CONTINUEASNEWEVENT']._serialized_start=3446 + _globals['_CONTINUEASNEWEVENT']._serialized_end=3511 + _globals['_EXECUTIONSUSPENDEDEVENT']._serialized_start=3513 + _globals['_EXECUTIONSUSPENDEDEVENT']._serialized_end=3583 + _globals['_EXECUTIONRESUMEDEVENT']._serialized_start=3585 + _globals['_EXECUTIONRESUMEDEVENT']._serialized_end=3653 + _globals['_ENTITYOPERATIONSIGNALEDEVENT']._serialized_start=3656 + _globals['_ENTITYOPERATIONSIGNALEDEVENT']._serialized_end=3876 + _globals['_ENTITYOPERATIONCALLEDEVENT']._serialized_start=3879 + _globals['_ENTITYOPERATIONCALLEDEVENT']._serialized_end=4210 + _globals['_ENTITYLOCKREQUESTEDEVENT']._serialized_start=4213 + _globals['_ENTITYLOCKREQUESTEDEVENT']._serialized_end=4357 + _globals['_ENTITYOPERATIONCOMPLETEDEVENT']._serialized_start=4359 + _globals['_ENTITYOPERATIONCOMPLETEDEVENT']._serialized_end=4455 + _globals['_ENTITYOPERATIONFAILEDEVENT']._serialized_start=4457 + _globals['_ENTITYOPERATIONFAILEDEVENT']._serialized_end=4549 + _globals['_ENTITYUNLOCKSENTEVENT']._serialized_start=4552 + _globals['_ENTITYUNLOCKSENTEVENT']._serialized_end=4714 + _globals['_ENTITYLOCKGRANTEDEVENT']._serialized_start=4716 + _globals['_ENTITYLOCKGRANTEDEVENT']._serialized_end=4767 + _globals['_HISTORYEVENT']._serialized_start=4770 + _globals['_HISTORYEVENT']._serialized_end=6395 + _globals['_SCHEDULETASKACTION']._serialized_start=6398 + _globals['_SCHEDULETASKACTION']._serialized_end=6594 + _globals['_CREATESUBORCHESTRATIONACTION']._serialized_start=6597 + _globals['_CREATESUBORCHESTRATIONACTION']._serialized_end=6798 + _globals['_CREATETIMERACTION']._serialized_start=6800 + _globals['_CREATETIMERACTION']._serialized_end=6891 + _globals['_SENDEVENTACTION']._serialized_start=6893 + _globals['_SENDEVENTACTION']._serialized_end=7010 + _globals['_COMPLETEORCHESTRATIONACTION']._serialized_start=7013 + _globals['_COMPLETEORCHESTRATIONACTION']._serialized_end=7321 + _globals['_TERMINATEORCHESTRATIONACTION']._serialized_start=7323 + _globals['_TERMINATEORCHESTRATIONACTION']._serialized_end=7436 + _globals['_SENDENTITYMESSAGEACTION']._serialized_start=7439 + _globals['_SENDENTITYMESSAGEACTION']._serialized_end=7723 + _globals['_ORCHESTRATORACTION']._serialized_start=7726 + _globals['_ORCHESTRATORACTION']._serialized_end=8204 + _globals['_ORCHESTRATORREQUEST']._serialized_start=8207 + _globals['_ORCHESTRATORREQUEST']._serialized_end=8504 + _globals['_ORCHESTRATORRESPONSE']._serialized_start=8507 + _globals['_ORCHESTRATORRESPONSE']._serialized_end=8721 + _globals['_CREATEINSTANCEREQUEST']._serialized_start=8724 + _globals['_CREATEINSTANCEREQUEST']._serialized_end=9186 + _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_start=1743 + _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_end=1786 + _globals['_ORCHESTRATIONIDREUSEPOLICY']._serialized_start=9188 + _globals['_ORCHESTRATIONIDREUSEPOLICY']._serialized_end=9307 + _globals['_CREATEINSTANCERESPONSE']._serialized_start=9309 + _globals['_CREATEINSTANCERESPONSE']._serialized_end=9353 + _globals['_GETINSTANCEREQUEST']._serialized_start=9355 + _globals['_GETINSTANCEREQUEST']._serialized_end=9424 + _globals['_GETINSTANCERESPONSE']._serialized_start=9426 + _globals['_GETINSTANCERESPONSE']._serialized_end=9512 + _globals['_REWINDINSTANCEREQUEST']._serialized_start=9514 + _globals['_REWINDINSTANCEREQUEST']._serialized_end=9603 + _globals['_REWINDINSTANCERESPONSE']._serialized_start=9605 + _globals['_REWINDINSTANCERESPONSE']._serialized_end=9629 + _globals['_ORCHESTRATIONSTATE']._serialized_start=9632 + _globals['_ORCHESTRATIONSTATE']._serialized_end=10398 + _globals['_ORCHESTRATIONSTATE_TAGSENTRY']._serialized_start=1743 + _globals['_ORCHESTRATIONSTATE_TAGSENTRY']._serialized_end=1786 + _globals['_RAISEEVENTREQUEST']._serialized_start=10400 + _globals['_RAISEEVENTREQUEST']._serialized_end=10498 + _globals['_RAISEEVENTRESPONSE']._serialized_start=10500 + _globals['_RAISEEVENTRESPONSE']._serialized_end=10520 + _globals['_TERMINATEREQUEST']._serialized_start=10522 + _globals['_TERMINATEREQUEST']._serialized_end=10625 + _globals['_TERMINATERESPONSE']._serialized_start=10627 + _globals['_TERMINATERESPONSE']._serialized_end=10646 + _globals['_SUSPENDREQUEST']._serialized_start=10648 + _globals['_SUSPENDREQUEST']._serialized_end=10730 + _globals['_SUSPENDRESPONSE']._serialized_start=10732 + _globals['_SUSPENDRESPONSE']._serialized_end=10749 + _globals['_RESUMEREQUEST']._serialized_start=10751 + _globals['_RESUMEREQUEST']._serialized_end=10832 + _globals['_RESUMERESPONSE']._serialized_start=10834 + _globals['_RESUMERESPONSE']._serialized_end=10850 + _globals['_QUERYINSTANCESREQUEST']._serialized_start=10852 + _globals['_QUERYINSTANCESREQUEST']._serialized_end=10906 + _globals['_INSTANCEQUERY']._serialized_start=10909 + _globals['_INSTANCEQUERY']._serialized_end=11295 + _globals['_QUERYINSTANCESRESPONSE']._serialized_start=11298 + _globals['_QUERYINSTANCESRESPONSE']._serialized_end=11428 + _globals['_PURGEINSTANCESREQUEST']._serialized_start=11431 + _globals['_PURGEINSTANCESREQUEST']._serialized_end=11559 + _globals['_PURGEINSTANCEFILTER']._serialized_start=11562 + _globals['_PURGEINSTANCEFILTER']._serialized_end=11732 + _globals['_PURGEINSTANCESRESPONSE']._serialized_start=11734 + _globals['_PURGEINSTANCESRESPONSE']._serialized_end=11836 + _globals['_CREATETASKHUBREQUEST']._serialized_start=11838 + _globals['_CREATETASKHUBREQUEST']._serialized_end=11886 + _globals['_CREATETASKHUBRESPONSE']._serialized_start=11888 + _globals['_CREATETASKHUBRESPONSE']._serialized_end=11911 + _globals['_DELETETASKHUBREQUEST']._serialized_start=11913 + _globals['_DELETETASKHUBREQUEST']._serialized_end=11935 + _globals['_DELETETASKHUBRESPONSE']._serialized_start=11937 + _globals['_DELETETASKHUBRESPONSE']._serialized_end=11960 + _globals['_SIGNALENTITYREQUEST']._serialized_start=11963 + _globals['_SIGNALENTITYREQUEST']._serialized_end=12133 + _globals['_SIGNALENTITYRESPONSE']._serialized_start=12135 + _globals['_SIGNALENTITYRESPONSE']._serialized_end=12157 + _globals['_GETENTITYREQUEST']._serialized_start=12159 + _globals['_GETENTITYREQUEST']._serialized_end=12219 + _globals['_GETENTITYRESPONSE']._serialized_start=12221 + _globals['_GETENTITYRESPONSE']._serialized_end=12289 + _globals['_ENTITYQUERY']._serialized_start=12292 + _globals['_ENTITYQUERY']._serialized_end=12623 + _globals['_QUERYENTITIESREQUEST']._serialized_start=12625 + _globals['_QUERYENTITIESREQUEST']._serialized_end=12676 + _globals['_QUERYENTITIESRESPONSE']._serialized_start=12678 + _globals['_QUERYENTITIESRESPONSE']._serialized_end=12793 + _globals['_ENTITYMETADATA']._serialized_start=12796 + _globals['_ENTITYMETADATA']._serialized_end=13015 + _globals['_CLEANENTITYSTORAGEREQUEST']._serialized_start=13018 + _globals['_CLEANENTITYSTORAGEREQUEST']._serialized_end=13161 + _globals['_CLEANENTITYSTORAGERESPONSE']._serialized_start=13164 + _globals['_CLEANENTITYSTORAGERESPONSE']._serialized_end=13310 + _globals['_ORCHESTRATORENTITYPARAMETERS']._serialized_start=13312 + _globals['_ORCHESTRATORENTITYPARAMETERS']._serialized_end=13405 + _globals['_ENTITYBATCHREQUEST']._serialized_start=13408 + _globals['_ENTITYBATCHREQUEST']._serialized_end=13538 + _globals['_ENTITYBATCHRESULT']._serialized_start=13541 + _globals['_ENTITYBATCHRESULT']._serialized_end=13791 + _globals['_ENTITYREQUEST']._serialized_start=13794 + _globals['_ENTITYREQUEST']._serialized_end=13943 + _globals['_OPERATIONREQUEST']._serialized_start=13945 + _globals['_OPERATIONREQUEST']._serialized_end=14046 + _globals['_OPERATIONRESULT']._serialized_start=14048 + _globals['_OPERATIONRESULT']._serialized_end=14167 + _globals['_OPERATIONINFO']._serialized_start=14169 + _globals['_OPERATIONINFO']._serialized_end=14256 + _globals['_OPERATIONRESULTSUCCESS']._serialized_start=14258 + _globals['_OPERATIONRESULTSUCCESS']._serialized_end=14328 + _globals['_OPERATIONRESULTFAILURE']._serialized_start=14330 + _globals['_OPERATIONRESULTFAILURE']._serialized_end=14399 + _globals['_OPERATIONACTION']._serialized_start=14402 + _globals['_OPERATIONACTION']._serialized_end=14558 + _globals['_SENDSIGNALACTION']._serialized_start=14561 + _globals['_SENDSIGNALACTION']._serialized_end=14709 + _globals['_STARTNEWORCHESTRATIONACTION']._serialized_start=14712 + _globals['_STARTNEWORCHESTRATIONACTION']._serialized_end=14918 + _globals['_ABANDONACTIVITYTASKREQUEST']._serialized_start=14920 + _globals['_ABANDONACTIVITYTASKREQUEST']._serialized_end=14973 + _globals['_ABANDONACTIVITYTASKRESPONSE']._serialized_start=14975 + _globals['_ABANDONACTIVITYTASKRESPONSE']._serialized_end=15004 + _globals['_ABANDONORCHESTRATIONTASKREQUEST']._serialized_start=15006 + _globals['_ABANDONORCHESTRATIONTASKREQUEST']._serialized_end=15064 + _globals['_ABANDONORCHESTRATIONTASKRESPONSE']._serialized_start=15066 + _globals['_ABANDONORCHESTRATIONTASKRESPONSE']._serialized_end=15100 + _globals['_ABANDONENTITYTASKREQUEST']._serialized_start=15102 + _globals['_ABANDONENTITYTASKREQUEST']._serialized_end=15153 + _globals['_ABANDONENTITYTASKRESPONSE']._serialized_start=15155 + _globals['_ABANDONENTITYTASKRESPONSE']._serialized_end=15182 + _globals['_GETWORKITEMSREQUEST']._serialized_start=15185 + _globals['_GETWORKITEMSREQUEST']._serialized_end=15370 + _globals['_WORKITEM']._serialized_start=15373 + _globals['_WORKITEM']._serialized_end=15641 + _globals['_COMPLETETASKRESPONSE']._serialized_start=15643 + _globals['_COMPLETETASKRESPONSE']._serialized_end=15665 + _globals['_HEALTHPING']._serialized_start=15667 + _globals['_HEALTHPING']._serialized_end=15679 + _globals['_STREAMINSTANCEHISTORYREQUEST']._serialized_start=15682 + _globals['_STREAMINSTANCEHISTORYREQUEST']._serialized_end=15814 + _globals['_HISTORYCHUNK']._serialized_start=15816 + _globals['_HISTORYCHUNK']._serialized_end=15861 + _globals['_RERUNWORKFLOWFROMEVENTREQUEST']._serialized_start=15864 + _globals['_RERUNWORKFLOWFROMEVENTREQUEST']._serialized_end=16053 + _globals['_RERUNWORKFLOWFROMEVENTRESPONSE']._serialized_start=16055 + _globals['_RERUNWORKFLOWFROMEVENTRESPONSE']._serialized_end=16110 + _globals['_TASKHUBSIDECARSERVICE']._serialized_start=16588 + _globals['_TASKHUBSIDECARSERVICE']._serialized_end=18434 # @@protoc_insertion_point(module_scope) diff --git a/durabletask/internal/orchestrator_service_pb2.pyi b/durabletask/internal/orchestrator_service_pb2.pyi index 83d3d06..102e183 100644 --- a/durabletask/internal/orchestrator_service_pb2.pyi +++ b/durabletask/internal/orchestrator_service_pb2.pyi @@ -1,3 +1,5 @@ +import datetime + from google.protobuf import timestamp_pb2 as _timestamp_pb2 from google.protobuf import duration_pb2 as _duration_pb2 from google.protobuf import wrappers_pb2 as _wrappers_pb2 @@ -6,7 +8,8 @@ from google.protobuf.internal import containers as _containers from google.protobuf.internal import enum_type_wrapper as _enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message -from typing import ClassVar as _ClassVar, Iterable as _Iterable, Mapping as _Mapping, Optional as _Optional, Union as _Union +from collections.abc import Iterable as _Iterable, Mapping as _Mapping +from typing import ClassVar as _ClassVar, Optional as _Optional, Union as _Union DESCRIPTOR: _descriptor.FileDescriptor @@ -45,6 +48,14 @@ TERMINATE: CreateOrchestrationAction WORKER_CAPABILITY_UNSPECIFIED: WorkerCapability WORKER_CAPABILITY_HISTORY_STREAMING: WorkerCapability +class TaskRouter(_message.Message): + __slots__ = ("sourceAppID", "targetAppID") + SOURCEAPPID_FIELD_NUMBER: _ClassVar[int] + TARGETAPPID_FIELD_NUMBER: _ClassVar[int] + sourceAppID: str + targetAppID: str + def __init__(self, sourceAppID: _Optional[str] = ..., targetAppID: _Optional[str] = ...) -> None: ... + class OrchestrationInstance(_message.Message): __slots__ = ("instanceId", "executionId") INSTANCEID_FIELD_NUMBER: _ClassVar[int] @@ -54,20 +65,22 @@ class OrchestrationInstance(_message.Message): def __init__(self, instanceId: _Optional[str] = ..., executionId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ...) -> None: ... class ActivityRequest(_message.Message): - __slots__ = ("name", "version", "input", "orchestrationInstance", "taskId", "parentTraceContext") + __slots__ = ("name", "version", "input", "orchestrationInstance", "taskId", "parentTraceContext", "taskExecutionId") NAME_FIELD_NUMBER: _ClassVar[int] VERSION_FIELD_NUMBER: _ClassVar[int] INPUT_FIELD_NUMBER: _ClassVar[int] ORCHESTRATIONINSTANCE_FIELD_NUMBER: _ClassVar[int] TASKID_FIELD_NUMBER: _ClassVar[int] PARENTTRACECONTEXT_FIELD_NUMBER: _ClassVar[int] + TASKEXECUTIONID_FIELD_NUMBER: _ClassVar[int] name: str version: _wrappers_pb2.StringValue input: _wrappers_pb2.StringValue orchestrationInstance: OrchestrationInstance taskId: int parentTraceContext: TraceContext - def __init__(self, name: _Optional[str] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., orchestrationInstance: _Optional[_Union[OrchestrationInstance, _Mapping]] = ..., taskId: _Optional[int] = ..., parentTraceContext: _Optional[_Union[TraceContext, _Mapping]] = ...) -> None: ... + taskExecutionId: str + def __init__(self, name: _Optional[str] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., orchestrationInstance: _Optional[_Union[OrchestrationInstance, _Mapping]] = ..., taskId: _Optional[int] = ..., parentTraceContext: _Optional[_Union[TraceContext, _Mapping]] = ..., taskExecutionId: _Optional[str] = ...) -> None: ... class ActivityResponse(_message.Message): __slots__ = ("instanceId", "taskId", "result", "failureDetails", "completionToken") @@ -98,16 +111,18 @@ class TaskFailureDetails(_message.Message): def __init__(self, errorType: _Optional[str] = ..., errorMessage: _Optional[str] = ..., stackTrace: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., innerFailure: _Optional[_Union[TaskFailureDetails, _Mapping]] = ..., isNonRetriable: bool = ...) -> None: ... class ParentInstanceInfo(_message.Message): - __slots__ = ("taskScheduledId", "name", "version", "orchestrationInstance") + __slots__ = ("taskScheduledId", "name", "version", "orchestrationInstance", "appID") TASKSCHEDULEDID_FIELD_NUMBER: _ClassVar[int] NAME_FIELD_NUMBER: _ClassVar[int] VERSION_FIELD_NUMBER: _ClassVar[int] ORCHESTRATIONINSTANCE_FIELD_NUMBER: _ClassVar[int] + APPID_FIELD_NUMBER: _ClassVar[int] taskScheduledId: int name: _wrappers_pb2.StringValue version: _wrappers_pb2.StringValue orchestrationInstance: OrchestrationInstance - def __init__(self, taskScheduledId: _Optional[int] = ..., name: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., orchestrationInstance: _Optional[_Union[OrchestrationInstance, _Mapping]] = ...) -> None: ... + appID: str + def __init__(self, taskScheduledId: _Optional[int] = ..., name: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., orchestrationInstance: _Optional[_Union[OrchestrationInstance, _Mapping]] = ..., appID: _Optional[str] = ...) -> None: ... class TraceContext(_message.Message): __slots__ = ("traceParent", "spanID", "traceState") @@ -120,7 +135,14 @@ class TraceContext(_message.Message): def __init__(self, traceParent: _Optional[str] = ..., spanID: _Optional[str] = ..., traceState: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ...) -> None: ... class ExecutionStartedEvent(_message.Message): - __slots__ = ("name", "version", "input", "orchestrationInstance", "parentInstance", "scheduledStartTimestamp", "parentTraceContext", "orchestrationSpanID") + __slots__ = ("name", "version", "input", "orchestrationInstance", "parentInstance", "scheduledStartTimestamp", "parentTraceContext", "orchestrationSpanID", "tags") + class TagsEntry(_message.Message): + __slots__ = ("key", "value") + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: str + def __init__(self, key: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ... NAME_FIELD_NUMBER: _ClassVar[int] VERSION_FIELD_NUMBER: _ClassVar[int] INPUT_FIELD_NUMBER: _ClassVar[int] @@ -129,6 +151,7 @@ class ExecutionStartedEvent(_message.Message): SCHEDULEDSTARTTIMESTAMP_FIELD_NUMBER: _ClassVar[int] PARENTTRACECONTEXT_FIELD_NUMBER: _ClassVar[int] ORCHESTRATIONSPANID_FIELD_NUMBER: _ClassVar[int] + TAGS_FIELD_NUMBER: _ClassVar[int] name: str version: _wrappers_pb2.StringValue input: _wrappers_pb2.StringValue @@ -137,7 +160,8 @@ class ExecutionStartedEvent(_message.Message): scheduledStartTimestamp: _timestamp_pb2.Timestamp parentTraceContext: TraceContext orchestrationSpanID: _wrappers_pb2.StringValue - def __init__(self, name: _Optional[str] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., orchestrationInstance: _Optional[_Union[OrchestrationInstance, _Mapping]] = ..., parentInstance: _Optional[_Union[ParentInstanceInfo, _Mapping]] = ..., scheduledStartTimestamp: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., parentTraceContext: _Optional[_Union[TraceContext, _Mapping]] = ..., orchestrationSpanID: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ...) -> None: ... + tags: _containers.ScalarMap[str, str] + def __init__(self, name: _Optional[str] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., orchestrationInstance: _Optional[_Union[OrchestrationInstance, _Mapping]] = ..., parentInstance: _Optional[_Union[ParentInstanceInfo, _Mapping]] = ..., scheduledStartTimestamp: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ..., parentTraceContext: _Optional[_Union[TraceContext, _Mapping]] = ..., orchestrationSpanID: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., tags: _Optional[_Mapping[str, str]] = ...) -> None: ... class ExecutionCompletedEvent(_message.Message): __slots__ = ("orchestrationStatus", "result", "failureDetails") @@ -158,32 +182,38 @@ class ExecutionTerminatedEvent(_message.Message): def __init__(self, input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., recurse: bool = ...) -> None: ... class TaskScheduledEvent(_message.Message): - __slots__ = ("name", "version", "input", "parentTraceContext") + __slots__ = ("name", "version", "input", "parentTraceContext", "taskExecutionId") NAME_FIELD_NUMBER: _ClassVar[int] VERSION_FIELD_NUMBER: _ClassVar[int] INPUT_FIELD_NUMBER: _ClassVar[int] PARENTTRACECONTEXT_FIELD_NUMBER: _ClassVar[int] + TASKEXECUTIONID_FIELD_NUMBER: _ClassVar[int] name: str version: _wrappers_pb2.StringValue input: _wrappers_pb2.StringValue parentTraceContext: TraceContext - def __init__(self, name: _Optional[str] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., parentTraceContext: _Optional[_Union[TraceContext, _Mapping]] = ...) -> None: ... + taskExecutionId: str + def __init__(self, name: _Optional[str] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., parentTraceContext: _Optional[_Union[TraceContext, _Mapping]] = ..., taskExecutionId: _Optional[str] = ...) -> None: ... class TaskCompletedEvent(_message.Message): - __slots__ = ("taskScheduledId", "result") + __slots__ = ("taskScheduledId", "result", "taskExecutionId") TASKSCHEDULEDID_FIELD_NUMBER: _ClassVar[int] RESULT_FIELD_NUMBER: _ClassVar[int] + TASKEXECUTIONID_FIELD_NUMBER: _ClassVar[int] taskScheduledId: int result: _wrappers_pb2.StringValue - def __init__(self, taskScheduledId: _Optional[int] = ..., result: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ...) -> None: ... + taskExecutionId: str + def __init__(self, taskScheduledId: _Optional[int] = ..., result: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., taskExecutionId: _Optional[str] = ...) -> None: ... class TaskFailedEvent(_message.Message): - __slots__ = ("taskScheduledId", "failureDetails") + __slots__ = ("taskScheduledId", "failureDetails", "taskExecutionId") TASKSCHEDULEDID_FIELD_NUMBER: _ClassVar[int] FAILUREDETAILS_FIELD_NUMBER: _ClassVar[int] + TASKEXECUTIONID_FIELD_NUMBER: _ClassVar[int] taskScheduledId: int failureDetails: TaskFailureDetails - def __init__(self, taskScheduledId: _Optional[int] = ..., failureDetails: _Optional[_Union[TaskFailureDetails, _Mapping]] = ...) -> None: ... + taskExecutionId: str + def __init__(self, taskScheduledId: _Optional[int] = ..., failureDetails: _Optional[_Union[TaskFailureDetails, _Mapping]] = ..., taskExecutionId: _Optional[str] = ...) -> None: ... class SubOrchestrationInstanceCreatedEvent(_message.Message): __slots__ = ("instanceId", "name", "version", "input", "parentTraceContext") @@ -216,10 +246,12 @@ class SubOrchestrationInstanceFailedEvent(_message.Message): def __init__(self, taskScheduledId: _Optional[int] = ..., failureDetails: _Optional[_Union[TaskFailureDetails, _Mapping]] = ...) -> None: ... class TimerCreatedEvent(_message.Message): - __slots__ = ("fireAt",) + __slots__ = ("fireAt", "name") FIREAT_FIELD_NUMBER: _ClassVar[int] + NAME_FIELD_NUMBER: _ClassVar[int] fireAt: _timestamp_pb2.Timestamp - def __init__(self, fireAt: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ...) -> None: ... + name: str + def __init__(self, fireAt: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ..., name: _Optional[str] = ...) -> None: ... class TimerFiredEvent(_message.Message): __slots__ = ("fireAt", "timerId") @@ -227,7 +259,7 @@ class TimerFiredEvent(_message.Message): TIMERID_FIELD_NUMBER: _ClassVar[int] fireAt: _timestamp_pb2.Timestamp timerId: int - def __init__(self, fireAt: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., timerId: _Optional[int] = ...) -> None: ... + def __init__(self, fireAt: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ..., timerId: _Optional[int] = ...) -> None: ... class OrchestratorStartedEvent(_message.Message): __slots__ = () @@ -297,7 +329,7 @@ class EntityOperationSignaledEvent(_message.Message): scheduledTime: _timestamp_pb2.Timestamp input: _wrappers_pb2.StringValue targetInstanceId: _wrappers_pb2.StringValue - def __init__(self, requestId: _Optional[str] = ..., operation: _Optional[str] = ..., scheduledTime: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., targetInstanceId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ...) -> None: ... + def __init__(self, requestId: _Optional[str] = ..., operation: _Optional[str] = ..., scheduledTime: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., targetInstanceId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ...) -> None: ... class EntityOperationCalledEvent(_message.Message): __slots__ = ("requestId", "operation", "scheduledTime", "input", "parentInstanceId", "parentExecutionId", "targetInstanceId") @@ -315,7 +347,7 @@ class EntityOperationCalledEvent(_message.Message): parentInstanceId: _wrappers_pb2.StringValue parentExecutionId: _wrappers_pb2.StringValue targetInstanceId: _wrappers_pb2.StringValue - def __init__(self, requestId: _Optional[str] = ..., operation: _Optional[str] = ..., scheduledTime: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., parentInstanceId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., parentExecutionId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., targetInstanceId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ...) -> None: ... + def __init__(self, requestId: _Optional[str] = ..., operation: _Optional[str] = ..., scheduledTime: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., parentInstanceId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., parentExecutionId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., targetInstanceId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ...) -> None: ... class EntityLockRequestedEvent(_message.Message): __slots__ = ("criticalSectionId", "lockSet", "position", "parentInstanceId") @@ -362,7 +394,7 @@ class EntityLockGrantedEvent(_message.Message): def __init__(self, criticalSectionId: _Optional[str] = ...) -> None: ... class HistoryEvent(_message.Message): - __slots__ = ("eventId", "timestamp", "executionStarted", "executionCompleted", "executionTerminated", "taskScheduled", "taskCompleted", "taskFailed", "subOrchestrationInstanceCreated", "subOrchestrationInstanceCompleted", "subOrchestrationInstanceFailed", "timerCreated", "timerFired", "orchestratorStarted", "orchestratorCompleted", "eventSent", "eventRaised", "genericEvent", "historyState", "continueAsNew", "executionSuspended", "executionResumed", "entityOperationSignaled", "entityOperationCalled", "entityOperationCompleted", "entityOperationFailed", "entityLockRequested", "entityLockGranted", "entityUnlockSent") + __slots__ = ("eventId", "timestamp", "executionStarted", "executionCompleted", "executionTerminated", "taskScheduled", "taskCompleted", "taskFailed", "subOrchestrationInstanceCreated", "subOrchestrationInstanceCompleted", "subOrchestrationInstanceFailed", "timerCreated", "timerFired", "orchestratorStarted", "orchestratorCompleted", "eventSent", "eventRaised", "genericEvent", "historyState", "continueAsNew", "executionSuspended", "executionResumed", "entityOperationSignaled", "entityOperationCalled", "entityOperationCompleted", "entityOperationFailed", "entityLockRequested", "entityLockGranted", "entityUnlockSent", "router") EVENTID_FIELD_NUMBER: _ClassVar[int] TIMESTAMP_FIELD_NUMBER: _ClassVar[int] EXECUTIONSTARTED_FIELD_NUMBER: _ClassVar[int] @@ -392,6 +424,7 @@ class HistoryEvent(_message.Message): ENTITYLOCKREQUESTED_FIELD_NUMBER: _ClassVar[int] ENTITYLOCKGRANTED_FIELD_NUMBER: _ClassVar[int] ENTITYUNLOCKSENT_FIELD_NUMBER: _ClassVar[int] + ROUTER_FIELD_NUMBER: _ClassVar[int] eventId: int timestamp: _timestamp_pb2.Timestamp executionStarted: ExecutionStartedEvent @@ -421,35 +454,44 @@ class HistoryEvent(_message.Message): entityLockRequested: EntityLockRequestedEvent entityLockGranted: EntityLockGrantedEvent entityUnlockSent: EntityUnlockSentEvent - def __init__(self, eventId: _Optional[int] = ..., timestamp: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., executionStarted: _Optional[_Union[ExecutionStartedEvent, _Mapping]] = ..., executionCompleted: _Optional[_Union[ExecutionCompletedEvent, _Mapping]] = ..., executionTerminated: _Optional[_Union[ExecutionTerminatedEvent, _Mapping]] = ..., taskScheduled: _Optional[_Union[TaskScheduledEvent, _Mapping]] = ..., taskCompleted: _Optional[_Union[TaskCompletedEvent, _Mapping]] = ..., taskFailed: _Optional[_Union[TaskFailedEvent, _Mapping]] = ..., subOrchestrationInstanceCreated: _Optional[_Union[SubOrchestrationInstanceCreatedEvent, _Mapping]] = ..., subOrchestrationInstanceCompleted: _Optional[_Union[SubOrchestrationInstanceCompletedEvent, _Mapping]] = ..., subOrchestrationInstanceFailed: _Optional[_Union[SubOrchestrationInstanceFailedEvent, _Mapping]] = ..., timerCreated: _Optional[_Union[TimerCreatedEvent, _Mapping]] = ..., timerFired: _Optional[_Union[TimerFiredEvent, _Mapping]] = ..., orchestratorStarted: _Optional[_Union[OrchestratorStartedEvent, _Mapping]] = ..., orchestratorCompleted: _Optional[_Union[OrchestratorCompletedEvent, _Mapping]] = ..., eventSent: _Optional[_Union[EventSentEvent, _Mapping]] = ..., eventRaised: _Optional[_Union[EventRaisedEvent, _Mapping]] = ..., genericEvent: _Optional[_Union[GenericEvent, _Mapping]] = ..., historyState: _Optional[_Union[HistoryStateEvent, _Mapping]] = ..., continueAsNew: _Optional[_Union[ContinueAsNewEvent, _Mapping]] = ..., executionSuspended: _Optional[_Union[ExecutionSuspendedEvent, _Mapping]] = ..., executionResumed: _Optional[_Union[ExecutionResumedEvent, _Mapping]] = ..., entityOperationSignaled: _Optional[_Union[EntityOperationSignaledEvent, _Mapping]] = ..., entityOperationCalled: _Optional[_Union[EntityOperationCalledEvent, _Mapping]] = ..., entityOperationCompleted: _Optional[_Union[EntityOperationCompletedEvent, _Mapping]] = ..., entityOperationFailed: _Optional[_Union[EntityOperationFailedEvent, _Mapping]] = ..., entityLockRequested: _Optional[_Union[EntityLockRequestedEvent, _Mapping]] = ..., entityLockGranted: _Optional[_Union[EntityLockGrantedEvent, _Mapping]] = ..., entityUnlockSent: _Optional[_Union[EntityUnlockSentEvent, _Mapping]] = ...) -> None: ... + router: TaskRouter + def __init__(self, eventId: _Optional[int] = ..., timestamp: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ..., executionStarted: _Optional[_Union[ExecutionStartedEvent, _Mapping]] = ..., executionCompleted: _Optional[_Union[ExecutionCompletedEvent, _Mapping]] = ..., executionTerminated: _Optional[_Union[ExecutionTerminatedEvent, _Mapping]] = ..., taskScheduled: _Optional[_Union[TaskScheduledEvent, _Mapping]] = ..., taskCompleted: _Optional[_Union[TaskCompletedEvent, _Mapping]] = ..., taskFailed: _Optional[_Union[TaskFailedEvent, _Mapping]] = ..., subOrchestrationInstanceCreated: _Optional[_Union[SubOrchestrationInstanceCreatedEvent, _Mapping]] = ..., subOrchestrationInstanceCompleted: _Optional[_Union[SubOrchestrationInstanceCompletedEvent, _Mapping]] = ..., subOrchestrationInstanceFailed: _Optional[_Union[SubOrchestrationInstanceFailedEvent, _Mapping]] = ..., timerCreated: _Optional[_Union[TimerCreatedEvent, _Mapping]] = ..., timerFired: _Optional[_Union[TimerFiredEvent, _Mapping]] = ..., orchestratorStarted: _Optional[_Union[OrchestratorStartedEvent, _Mapping]] = ..., orchestratorCompleted: _Optional[_Union[OrchestratorCompletedEvent, _Mapping]] = ..., eventSent: _Optional[_Union[EventSentEvent, _Mapping]] = ..., eventRaised: _Optional[_Union[EventRaisedEvent, _Mapping]] = ..., genericEvent: _Optional[_Union[GenericEvent, _Mapping]] = ..., historyState: _Optional[_Union[HistoryStateEvent, _Mapping]] = ..., continueAsNew: _Optional[_Union[ContinueAsNewEvent, _Mapping]] = ..., executionSuspended: _Optional[_Union[ExecutionSuspendedEvent, _Mapping]] = ..., executionResumed: _Optional[_Union[ExecutionResumedEvent, _Mapping]] = ..., entityOperationSignaled: _Optional[_Union[EntityOperationSignaledEvent, _Mapping]] = ..., entityOperationCalled: _Optional[_Union[EntityOperationCalledEvent, _Mapping]] = ..., entityOperationCompleted: _Optional[_Union[EntityOperationCompletedEvent, _Mapping]] = ..., entityOperationFailed: _Optional[_Union[EntityOperationFailedEvent, _Mapping]] = ..., entityLockRequested: _Optional[_Union[EntityLockRequestedEvent, _Mapping]] = ..., entityLockGranted: _Optional[_Union[EntityLockGrantedEvent, _Mapping]] = ..., entityUnlockSent: _Optional[_Union[EntityUnlockSentEvent, _Mapping]] = ..., router: _Optional[_Union[TaskRouter, _Mapping]] = ...) -> None: ... class ScheduleTaskAction(_message.Message): - __slots__ = ("name", "version", "input") + __slots__ = ("name", "version", "input", "router", "taskExecutionId") NAME_FIELD_NUMBER: _ClassVar[int] VERSION_FIELD_NUMBER: _ClassVar[int] INPUT_FIELD_NUMBER: _ClassVar[int] + ROUTER_FIELD_NUMBER: _ClassVar[int] + TASKEXECUTIONID_FIELD_NUMBER: _ClassVar[int] name: str version: _wrappers_pb2.StringValue input: _wrappers_pb2.StringValue - def __init__(self, name: _Optional[str] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ...) -> None: ... + router: TaskRouter + taskExecutionId: str + def __init__(self, name: _Optional[str] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., router: _Optional[_Union[TaskRouter, _Mapping]] = ..., taskExecutionId: _Optional[str] = ...) -> None: ... class CreateSubOrchestrationAction(_message.Message): - __slots__ = ("instanceId", "name", "version", "input") + __slots__ = ("instanceId", "name", "version", "input", "router") INSTANCEID_FIELD_NUMBER: _ClassVar[int] NAME_FIELD_NUMBER: _ClassVar[int] VERSION_FIELD_NUMBER: _ClassVar[int] INPUT_FIELD_NUMBER: _ClassVar[int] + ROUTER_FIELD_NUMBER: _ClassVar[int] instanceId: str name: str version: _wrappers_pb2.StringValue input: _wrappers_pb2.StringValue - def __init__(self, instanceId: _Optional[str] = ..., name: _Optional[str] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ...) -> None: ... + router: TaskRouter + def __init__(self, instanceId: _Optional[str] = ..., name: _Optional[str] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., router: _Optional[_Union[TaskRouter, _Mapping]] = ...) -> None: ... class CreateTimerAction(_message.Message): - __slots__ = ("fireAt",) + __slots__ = ("fireAt", "name") FIREAT_FIELD_NUMBER: _ClassVar[int] + NAME_FIELD_NUMBER: _ClassVar[int] fireAt: _timestamp_pb2.Timestamp - def __init__(self, fireAt: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ...) -> None: ... + name: str + def __init__(self, fireAt: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ..., name: _Optional[str] = ...) -> None: ... class SendEventAction(_message.Message): __slots__ = ("instance", "name", "data") @@ -487,8 +529,20 @@ class TerminateOrchestrationAction(_message.Message): recurse: bool def __init__(self, instanceId: _Optional[str] = ..., reason: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., recurse: bool = ...) -> None: ... +class SendEntityMessageAction(_message.Message): + __slots__ = ("entityOperationSignaled", "entityOperationCalled", "entityLockRequested", "entityUnlockSent") + ENTITYOPERATIONSIGNALED_FIELD_NUMBER: _ClassVar[int] + ENTITYOPERATIONCALLED_FIELD_NUMBER: _ClassVar[int] + ENTITYLOCKREQUESTED_FIELD_NUMBER: _ClassVar[int] + ENTITYUNLOCKSENT_FIELD_NUMBER: _ClassVar[int] + entityOperationSignaled: EntityOperationSignaledEvent + entityOperationCalled: EntityOperationCalledEvent + entityLockRequested: EntityLockRequestedEvent + entityUnlockSent: EntityUnlockSentEvent + def __init__(self, entityOperationSignaled: _Optional[_Union[EntityOperationSignaledEvent, _Mapping]] = ..., entityOperationCalled: _Optional[_Union[EntityOperationCalledEvent, _Mapping]] = ..., entityLockRequested: _Optional[_Union[EntityLockRequestedEvent, _Mapping]] = ..., entityUnlockSent: _Optional[_Union[EntityUnlockSentEvent, _Mapping]] = ...) -> None: ... + class OrchestratorAction(_message.Message): - __slots__ = ("id", "scheduleTask", "createSubOrchestration", "createTimer", "sendEvent", "completeOrchestration", "terminateOrchestration") + __slots__ = ("id", "scheduleTask", "createSubOrchestration", "createTimer", "sendEvent", "completeOrchestration", "terminateOrchestration", "sendEntityMessage", "router") ID_FIELD_NUMBER: _ClassVar[int] SCHEDULETASK_FIELD_NUMBER: _ClassVar[int] CREATESUBORCHESTRATION_FIELD_NUMBER: _ClassVar[int] @@ -496,6 +550,8 @@ class OrchestratorAction(_message.Message): SENDEVENT_FIELD_NUMBER: _ClassVar[int] COMPLETEORCHESTRATION_FIELD_NUMBER: _ClassVar[int] TERMINATEORCHESTRATION_FIELD_NUMBER: _ClassVar[int] + SENDENTITYMESSAGE_FIELD_NUMBER: _ClassVar[int] + ROUTER_FIELD_NUMBER: _ClassVar[int] id: int scheduleTask: ScheduleTaskAction createSubOrchestration: CreateSubOrchestrationAction @@ -503,23 +559,27 @@ class OrchestratorAction(_message.Message): sendEvent: SendEventAction completeOrchestration: CompleteOrchestrationAction terminateOrchestration: TerminateOrchestrationAction - def __init__(self, id: _Optional[int] = ..., scheduleTask: _Optional[_Union[ScheduleTaskAction, _Mapping]] = ..., createSubOrchestration: _Optional[_Union[CreateSubOrchestrationAction, _Mapping]] = ..., createTimer: _Optional[_Union[CreateTimerAction, _Mapping]] = ..., sendEvent: _Optional[_Union[SendEventAction, _Mapping]] = ..., completeOrchestration: _Optional[_Union[CompleteOrchestrationAction, _Mapping]] = ..., terminateOrchestration: _Optional[_Union[TerminateOrchestrationAction, _Mapping]] = ...) -> None: ... + sendEntityMessage: SendEntityMessageAction + router: TaskRouter + def __init__(self, id: _Optional[int] = ..., scheduleTask: _Optional[_Union[ScheduleTaskAction, _Mapping]] = ..., createSubOrchestration: _Optional[_Union[CreateSubOrchestrationAction, _Mapping]] = ..., createTimer: _Optional[_Union[CreateTimerAction, _Mapping]] = ..., sendEvent: _Optional[_Union[SendEventAction, _Mapping]] = ..., completeOrchestration: _Optional[_Union[CompleteOrchestrationAction, _Mapping]] = ..., terminateOrchestration: _Optional[_Union[TerminateOrchestrationAction, _Mapping]] = ..., sendEntityMessage: _Optional[_Union[SendEntityMessageAction, _Mapping]] = ..., router: _Optional[_Union[TaskRouter, _Mapping]] = ...) -> None: ... class OrchestratorRequest(_message.Message): - __slots__ = ("instanceId", "executionId", "pastEvents", "newEvents", "entityParameters", "requiresHistoryStreaming") + __slots__ = ("instanceId", "executionId", "pastEvents", "newEvents", "entityParameters", "requiresHistoryStreaming", "router") INSTANCEID_FIELD_NUMBER: _ClassVar[int] EXECUTIONID_FIELD_NUMBER: _ClassVar[int] PASTEVENTS_FIELD_NUMBER: _ClassVar[int] NEWEVENTS_FIELD_NUMBER: _ClassVar[int] ENTITYPARAMETERS_FIELD_NUMBER: _ClassVar[int] REQUIRESHISTORYSTREAMING_FIELD_NUMBER: _ClassVar[int] + ROUTER_FIELD_NUMBER: _ClassVar[int] instanceId: str executionId: _wrappers_pb2.StringValue pastEvents: _containers.RepeatedCompositeFieldContainer[HistoryEvent] newEvents: _containers.RepeatedCompositeFieldContainer[HistoryEvent] entityParameters: OrchestratorEntityParameters requiresHistoryStreaming: bool - def __init__(self, instanceId: _Optional[str] = ..., executionId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., pastEvents: _Optional[_Iterable[_Union[HistoryEvent, _Mapping]]] = ..., newEvents: _Optional[_Iterable[_Union[HistoryEvent, _Mapping]]] = ..., entityParameters: _Optional[_Union[OrchestratorEntityParameters, _Mapping]] = ..., requiresHistoryStreaming: bool = ...) -> None: ... + router: TaskRouter + def __init__(self, instanceId: _Optional[str] = ..., executionId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., pastEvents: _Optional[_Iterable[_Union[HistoryEvent, _Mapping]]] = ..., newEvents: _Optional[_Iterable[_Union[HistoryEvent, _Mapping]]] = ..., entityParameters: _Optional[_Union[OrchestratorEntityParameters, _Mapping]] = ..., requiresHistoryStreaming: bool = ..., router: _Optional[_Union[TaskRouter, _Mapping]] = ...) -> None: ... class OrchestratorResponse(_message.Message): __slots__ = ("instanceId", "actions", "customStatus", "completionToken", "numEventsProcessed") @@ -536,7 +596,7 @@ class OrchestratorResponse(_message.Message): def __init__(self, instanceId: _Optional[str] = ..., actions: _Optional[_Iterable[_Union[OrchestratorAction, _Mapping]]] = ..., customStatus: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., completionToken: _Optional[str] = ..., numEventsProcessed: _Optional[_Union[_wrappers_pb2.Int32Value, _Mapping]] = ...) -> None: ... class CreateInstanceRequest(_message.Message): - __slots__ = ("instanceId", "name", "version", "input", "scheduledStartTimestamp", "orchestrationIdReusePolicy", "executionId", "tags") + __slots__ = ("instanceId", "name", "version", "input", "scheduledStartTimestamp", "orchestrationIdReusePolicy", "executionId", "tags", "parentTraceContext") class TagsEntry(_message.Message): __slots__ = ("key", "value") KEY_FIELD_NUMBER: _ClassVar[int] @@ -552,6 +612,7 @@ class CreateInstanceRequest(_message.Message): ORCHESTRATIONIDREUSEPOLICY_FIELD_NUMBER: _ClassVar[int] EXECUTIONID_FIELD_NUMBER: _ClassVar[int] TAGS_FIELD_NUMBER: _ClassVar[int] + PARENTTRACECONTEXT_FIELD_NUMBER: _ClassVar[int] instanceId: str name: str version: _wrappers_pb2.StringValue @@ -560,7 +621,8 @@ class CreateInstanceRequest(_message.Message): orchestrationIdReusePolicy: OrchestrationIdReusePolicy executionId: _wrappers_pb2.StringValue tags: _containers.ScalarMap[str, str] - def __init__(self, instanceId: _Optional[str] = ..., name: _Optional[str] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., scheduledStartTimestamp: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., orchestrationIdReusePolicy: _Optional[_Union[OrchestrationIdReusePolicy, _Mapping]] = ..., executionId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., tags: _Optional[_Mapping[str, str]] = ...) -> None: ... + parentTraceContext: TraceContext + def __init__(self, instanceId: _Optional[str] = ..., name: _Optional[str] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., scheduledStartTimestamp: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ..., orchestrationIdReusePolicy: _Optional[_Union[OrchestrationIdReusePolicy, _Mapping]] = ..., executionId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., tags: _Optional[_Mapping[str, str]] = ..., parentTraceContext: _Optional[_Union[TraceContext, _Mapping]] = ...) -> None: ... class OrchestrationIdReusePolicy(_message.Message): __slots__ = ("operationStatus", "action") @@ -605,7 +667,14 @@ class RewindInstanceResponse(_message.Message): def __init__(self) -> None: ... class OrchestrationState(_message.Message): - __slots__ = ("instanceId", "name", "version", "orchestrationStatus", "scheduledStartTimestamp", "createdTimestamp", "lastUpdatedTimestamp", "input", "output", "customStatus", "failureDetails", "executionId", "completedTimestamp", "parentInstanceId") + __slots__ = ("instanceId", "name", "version", "orchestrationStatus", "scheduledStartTimestamp", "createdTimestamp", "lastUpdatedTimestamp", "input", "output", "customStatus", "failureDetails", "executionId", "completedTimestamp", "parentInstanceId", "tags") + class TagsEntry(_message.Message): + __slots__ = ("key", "value") + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: str + def __init__(self, key: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ... INSTANCEID_FIELD_NUMBER: _ClassVar[int] NAME_FIELD_NUMBER: _ClassVar[int] VERSION_FIELD_NUMBER: _ClassVar[int] @@ -620,6 +689,7 @@ class OrchestrationState(_message.Message): EXECUTIONID_FIELD_NUMBER: _ClassVar[int] COMPLETEDTIMESTAMP_FIELD_NUMBER: _ClassVar[int] PARENTINSTANCEID_FIELD_NUMBER: _ClassVar[int] + TAGS_FIELD_NUMBER: _ClassVar[int] instanceId: str name: str version: _wrappers_pb2.StringValue @@ -634,7 +704,8 @@ class OrchestrationState(_message.Message): executionId: _wrappers_pb2.StringValue completedTimestamp: _timestamp_pb2.Timestamp parentInstanceId: _wrappers_pb2.StringValue - def __init__(self, instanceId: _Optional[str] = ..., name: _Optional[str] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., orchestrationStatus: _Optional[_Union[OrchestrationStatus, str]] = ..., scheduledStartTimestamp: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., createdTimestamp: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., lastUpdatedTimestamp: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., output: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., customStatus: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., failureDetails: _Optional[_Union[TaskFailureDetails, _Mapping]] = ..., executionId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., completedTimestamp: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., parentInstanceId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ...) -> None: ... + tags: _containers.ScalarMap[str, str] + def __init__(self, instanceId: _Optional[str] = ..., name: _Optional[str] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., orchestrationStatus: _Optional[_Union[OrchestrationStatus, str]] = ..., scheduledStartTimestamp: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ..., createdTimestamp: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ..., lastUpdatedTimestamp: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., output: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., customStatus: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., failureDetails: _Optional[_Union[TaskFailureDetails, _Mapping]] = ..., executionId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., completedTimestamp: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ..., parentInstanceId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., tags: _Optional[_Mapping[str, str]] = ...) -> None: ... class RaiseEventRequest(_message.Message): __slots__ = ("instanceId", "name", "input") @@ -712,7 +783,7 @@ class InstanceQuery(_message.Message): continuationToken: _wrappers_pb2.StringValue instanceIdPrefix: _wrappers_pb2.StringValue fetchInputsAndOutputs: bool - def __init__(self, runtimeStatus: _Optional[_Iterable[_Union[OrchestrationStatus, str]]] = ..., createdTimeFrom: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., createdTimeTo: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., taskHubNames: _Optional[_Iterable[_Union[_wrappers_pb2.StringValue, _Mapping]]] = ..., maxInstanceCount: _Optional[int] = ..., continuationToken: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., instanceIdPrefix: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., fetchInputsAndOutputs: bool = ...) -> None: ... + def __init__(self, runtimeStatus: _Optional[_Iterable[_Union[OrchestrationStatus, str]]] = ..., createdTimeFrom: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ..., createdTimeTo: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ..., taskHubNames: _Optional[_Iterable[_Union[_wrappers_pb2.StringValue, _Mapping]]] = ..., maxInstanceCount: _Optional[int] = ..., continuationToken: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., instanceIdPrefix: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., fetchInputsAndOutputs: bool = ...) -> None: ... class QueryInstancesResponse(_message.Message): __slots__ = ("orchestrationState", "continuationToken") @@ -740,13 +811,15 @@ class PurgeInstanceFilter(_message.Message): createdTimeFrom: _timestamp_pb2.Timestamp createdTimeTo: _timestamp_pb2.Timestamp runtimeStatus: _containers.RepeatedScalarFieldContainer[OrchestrationStatus] - def __init__(self, createdTimeFrom: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., createdTimeTo: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., runtimeStatus: _Optional[_Iterable[_Union[OrchestrationStatus, str]]] = ...) -> None: ... + def __init__(self, createdTimeFrom: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ..., createdTimeTo: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ..., runtimeStatus: _Optional[_Iterable[_Union[OrchestrationStatus, str]]] = ...) -> None: ... class PurgeInstancesResponse(_message.Message): - __slots__ = ("deletedInstanceCount",) + __slots__ = ("deletedInstanceCount", "isComplete") DELETEDINSTANCECOUNT_FIELD_NUMBER: _ClassVar[int] + ISCOMPLETE_FIELD_NUMBER: _ClassVar[int] deletedInstanceCount: int - def __init__(self, deletedInstanceCount: _Optional[int] = ...) -> None: ... + isComplete: _wrappers_pb2.BoolValue + def __init__(self, deletedInstanceCount: _Optional[int] = ..., isComplete: _Optional[_Union[_wrappers_pb2.BoolValue, _Mapping]] = ...) -> None: ... class CreateTaskHubRequest(_message.Message): __slots__ = ("recreateIfExists",) @@ -778,7 +851,7 @@ class SignalEntityRequest(_message.Message): input: _wrappers_pb2.StringValue requestId: str scheduledTime: _timestamp_pb2.Timestamp - def __init__(self, instanceId: _Optional[str] = ..., name: _Optional[str] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., requestId: _Optional[str] = ..., scheduledTime: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ...) -> None: ... + def __init__(self, instanceId: _Optional[str] = ..., name: _Optional[str] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., requestId: _Optional[str] = ..., scheduledTime: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ...) -> None: ... class SignalEntityResponse(_message.Message): __slots__ = () @@ -816,7 +889,7 @@ class EntityQuery(_message.Message): includeTransient: bool pageSize: _wrappers_pb2.Int32Value continuationToken: _wrappers_pb2.StringValue - def __init__(self, instanceIdStartsWith: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., lastModifiedFrom: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., lastModifiedTo: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., includeState: bool = ..., includeTransient: bool = ..., pageSize: _Optional[_Union[_wrappers_pb2.Int32Value, _Mapping]] = ..., continuationToken: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ...) -> None: ... + def __init__(self, instanceIdStartsWith: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., lastModifiedFrom: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ..., lastModifiedTo: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ..., includeState: bool = ..., includeTransient: bool = ..., pageSize: _Optional[_Union[_wrappers_pb2.Int32Value, _Mapping]] = ..., continuationToken: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ...) -> None: ... class QueryEntitiesRequest(_message.Message): __slots__ = ("query",) @@ -844,7 +917,7 @@ class EntityMetadata(_message.Message): backlogQueueSize: int lockedBy: _wrappers_pb2.StringValue serializedState: _wrappers_pb2.StringValue - def __init__(self, instanceId: _Optional[str] = ..., lastModifiedTime: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., backlogQueueSize: _Optional[int] = ..., lockedBy: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., serializedState: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ...) -> None: ... + def __init__(self, instanceId: _Optional[str] = ..., lastModifiedTime: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ..., backlogQueueSize: _Optional[int] = ..., lockedBy: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., serializedState: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ...) -> None: ... class CleanEntityStorageRequest(_message.Message): __slots__ = ("continuationToken", "removeEmptyEntities", "releaseOrphanedLocks") @@ -870,7 +943,7 @@ class OrchestratorEntityParameters(_message.Message): __slots__ = ("entityMessageReorderWindow",) ENTITYMESSAGEREORDERWINDOW_FIELD_NUMBER: _ClassVar[int] entityMessageReorderWindow: _duration_pb2.Duration - def __init__(self, entityMessageReorderWindow: _Optional[_Union[_duration_pb2.Duration, _Mapping]] = ...) -> None: ... + def __init__(self, entityMessageReorderWindow: _Optional[_Union[datetime.timedelta, _duration_pb2.Duration, _Mapping]] = ...) -> None: ... class EntityBatchRequest(_message.Message): __slots__ = ("instanceId", "entityState", "operations") @@ -883,16 +956,20 @@ class EntityBatchRequest(_message.Message): def __init__(self, instanceId: _Optional[str] = ..., entityState: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., operations: _Optional[_Iterable[_Union[OperationRequest, _Mapping]]] = ...) -> None: ... class EntityBatchResult(_message.Message): - __slots__ = ("results", "actions", "entityState", "failureDetails") + __slots__ = ("results", "actions", "entityState", "failureDetails", "completionToken", "operationInfos") RESULTS_FIELD_NUMBER: _ClassVar[int] ACTIONS_FIELD_NUMBER: _ClassVar[int] ENTITYSTATE_FIELD_NUMBER: _ClassVar[int] FAILUREDETAILS_FIELD_NUMBER: _ClassVar[int] + COMPLETIONTOKEN_FIELD_NUMBER: _ClassVar[int] + OPERATIONINFOS_FIELD_NUMBER: _ClassVar[int] results: _containers.RepeatedCompositeFieldContainer[OperationResult] actions: _containers.RepeatedCompositeFieldContainer[OperationAction] entityState: _wrappers_pb2.StringValue failureDetails: TaskFailureDetails - def __init__(self, results: _Optional[_Iterable[_Union[OperationResult, _Mapping]]] = ..., actions: _Optional[_Iterable[_Union[OperationAction, _Mapping]]] = ..., entityState: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., failureDetails: _Optional[_Union[TaskFailureDetails, _Mapping]] = ...) -> None: ... + completionToken: str + operationInfos: _containers.RepeatedCompositeFieldContainer[OperationInfo] + def __init__(self, results: _Optional[_Iterable[_Union[OperationResult, _Mapping]]] = ..., actions: _Optional[_Iterable[_Union[OperationAction, _Mapping]]] = ..., entityState: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., failureDetails: _Optional[_Union[TaskFailureDetails, _Mapping]] = ..., completionToken: _Optional[str] = ..., operationInfos: _Optional[_Iterable[_Union[OperationInfo, _Mapping]]] = ...) -> None: ... class EntityRequest(_message.Message): __slots__ = ("instanceId", "executionId", "entityState", "operationRequests") @@ -924,6 +1001,14 @@ class OperationResult(_message.Message): failure: OperationResultFailure def __init__(self, success: _Optional[_Union[OperationResultSuccess, _Mapping]] = ..., failure: _Optional[_Union[OperationResultFailure, _Mapping]] = ...) -> None: ... +class OperationInfo(_message.Message): + __slots__ = ("requestId", "responseDestination") + REQUESTID_FIELD_NUMBER: _ClassVar[int] + RESPONSEDESTINATION_FIELD_NUMBER: _ClassVar[int] + requestId: str + responseDestination: OrchestrationInstance + def __init__(self, requestId: _Optional[str] = ..., responseDestination: _Optional[_Union[OrchestrationInstance, _Mapping]] = ...) -> None: ... + class OperationResultSuccess(_message.Message): __slots__ = ("result",) RESULT_FIELD_NUMBER: _ClassVar[int] @@ -956,7 +1041,7 @@ class SendSignalAction(_message.Message): name: str input: _wrappers_pb2.StringValue scheduledTime: _timestamp_pb2.Timestamp - def __init__(self, instanceId: _Optional[str] = ..., name: _Optional[str] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., scheduledTime: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ...) -> None: ... + def __init__(self, instanceId: _Optional[str] = ..., name: _Optional[str] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., scheduledTime: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ...) -> None: ... class StartNewOrchestrationAction(_message.Message): __slots__ = ("instanceId", "name", "version", "input", "scheduledTime") @@ -970,7 +1055,37 @@ class StartNewOrchestrationAction(_message.Message): version: _wrappers_pb2.StringValue input: _wrappers_pb2.StringValue scheduledTime: _timestamp_pb2.Timestamp - def __init__(self, instanceId: _Optional[str] = ..., name: _Optional[str] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., scheduledTime: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ...) -> None: ... + def __init__(self, instanceId: _Optional[str] = ..., name: _Optional[str] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., scheduledTime: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ...) -> None: ... + +class AbandonActivityTaskRequest(_message.Message): + __slots__ = ("completionToken",) + COMPLETIONTOKEN_FIELD_NUMBER: _ClassVar[int] + completionToken: str + def __init__(self, completionToken: _Optional[str] = ...) -> None: ... + +class AbandonActivityTaskResponse(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + +class AbandonOrchestrationTaskRequest(_message.Message): + __slots__ = ("completionToken",) + COMPLETIONTOKEN_FIELD_NUMBER: _ClassVar[int] + completionToken: str + def __init__(self, completionToken: _Optional[str] = ...) -> None: ... + +class AbandonOrchestrationTaskResponse(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + +class AbandonEntityTaskRequest(_message.Message): + __slots__ = ("completionToken",) + COMPLETIONTOKEN_FIELD_NUMBER: _ClassVar[int] + completionToken: str + def __init__(self, completionToken: _Optional[str] = ...) -> None: ... + +class AbandonEntityTaskResponse(_message.Message): + __slots__ = () + def __init__(self) -> None: ... class GetWorkItemsRequest(_message.Message): __slots__ = ("maxConcurrentOrchestrationWorkItems", "maxConcurrentActivityWorkItems", "maxConcurrentEntityWorkItems", "capabilities") @@ -1023,3 +1138,23 @@ class HistoryChunk(_message.Message): EVENTS_FIELD_NUMBER: _ClassVar[int] events: _containers.RepeatedCompositeFieldContainer[HistoryEvent] def __init__(self, events: _Optional[_Iterable[_Union[HistoryEvent, _Mapping]]] = ...) -> None: ... + +class RerunWorkflowFromEventRequest(_message.Message): + __slots__ = ("sourceInstanceID", "eventID", "newInstanceID", "input", "overwriteInput") + SOURCEINSTANCEID_FIELD_NUMBER: _ClassVar[int] + EVENTID_FIELD_NUMBER: _ClassVar[int] + NEWINSTANCEID_FIELD_NUMBER: _ClassVar[int] + INPUT_FIELD_NUMBER: _ClassVar[int] + OVERWRITEINPUT_FIELD_NUMBER: _ClassVar[int] + sourceInstanceID: str + eventID: int + newInstanceID: str + input: _wrappers_pb2.StringValue + overwriteInput: bool + def __init__(self, sourceInstanceID: _Optional[str] = ..., eventID: _Optional[int] = ..., newInstanceID: _Optional[str] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., overwriteInput: bool = ...) -> None: ... + +class RerunWorkflowFromEventResponse(_message.Message): + __slots__ = ("newInstanceID",) + NEWINSTANCEID_FIELD_NUMBER: _ClassVar[int] + newInstanceID: str + def __init__(self, newInstanceID: _Optional[str] = ...) -> None: ... diff --git a/durabletask/internal/orchestrator_service_pb2_grpc.py b/durabletask/internal/orchestrator_service_pb2_grpc.py index ea61301..7c12e1b 100644 --- a/durabletask/internal/orchestrator_service_pb2_grpc.py +++ b/durabletask/internal/orchestrator_service_pb2_grpc.py @@ -1,10 +1,30 @@ # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! """Client and server classes corresponding to protobuf-defined services.""" import grpc +import warnings from durabletask.internal import orchestrator_service_pb2 as durabletask_dot_internal_dot_orchestrator__service__pb2 from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 +GRPC_GENERATED_VERSION = '1.74.0' +GRPC_VERSION = grpc.__version__ +_version_not_supported = False + +try: + from grpc._utilities import first_version_is_lower + _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION) +except ImportError: + _version_not_supported = True + +if _version_not_supported: + raise RuntimeError( + f'The grpc package installed is at version {GRPC_VERSION},' + + f' but the generated code in durabletask/internal/orchestrator_service_pb2_grpc.py depends on' + + f' grpcio>={GRPC_GENERATED_VERSION}.' + + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}' + + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.' + ) + class TaskHubSidecarServiceStub(object): """Missing associated documentation comment in .proto file.""" @@ -19,117 +39,137 @@ def __init__(self, channel): '/TaskHubSidecarService/Hello', request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) + _registered_method=True) self.StartInstance = channel.unary_unary( '/TaskHubSidecarService/StartInstance', request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CreateInstanceRequest.SerializeToString, response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CreateInstanceResponse.FromString, - ) + _registered_method=True) self.GetInstance = channel.unary_unary( '/TaskHubSidecarService/GetInstance', request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceRequest.SerializeToString, response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceResponse.FromString, - ) + _registered_method=True) self.RewindInstance = channel.unary_unary( '/TaskHubSidecarService/RewindInstance', request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RewindInstanceRequest.SerializeToString, response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RewindInstanceResponse.FromString, - ) + _registered_method=True) self.WaitForInstanceStart = channel.unary_unary( '/TaskHubSidecarService/WaitForInstanceStart', request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceRequest.SerializeToString, response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceResponse.FromString, - ) + _registered_method=True) self.WaitForInstanceCompletion = channel.unary_unary( '/TaskHubSidecarService/WaitForInstanceCompletion', request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceRequest.SerializeToString, response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceResponse.FromString, - ) + _registered_method=True) self.RaiseEvent = channel.unary_unary( '/TaskHubSidecarService/RaiseEvent', request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RaiseEventRequest.SerializeToString, response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RaiseEventResponse.FromString, - ) + _registered_method=True) self.TerminateInstance = channel.unary_unary( '/TaskHubSidecarService/TerminateInstance', request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.TerminateRequest.SerializeToString, response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.TerminateResponse.FromString, - ) + _registered_method=True) self.SuspendInstance = channel.unary_unary( '/TaskHubSidecarService/SuspendInstance', request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.SuspendRequest.SerializeToString, response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.SuspendResponse.FromString, - ) + _registered_method=True) self.ResumeInstance = channel.unary_unary( '/TaskHubSidecarService/ResumeInstance', request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.ResumeRequest.SerializeToString, response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.ResumeResponse.FromString, - ) + _registered_method=True) self.QueryInstances = channel.unary_unary( '/TaskHubSidecarService/QueryInstances', request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.QueryInstancesRequest.SerializeToString, response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.QueryInstancesResponse.FromString, - ) + _registered_method=True) self.PurgeInstances = channel.unary_unary( '/TaskHubSidecarService/PurgeInstances', request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.PurgeInstancesRequest.SerializeToString, response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.PurgeInstancesResponse.FromString, - ) + _registered_method=True) self.GetWorkItems = channel.unary_stream( '/TaskHubSidecarService/GetWorkItems', request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetWorkItemsRequest.SerializeToString, response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.WorkItem.FromString, - ) + _registered_method=True) self.CompleteActivityTask = channel.unary_unary( '/TaskHubSidecarService/CompleteActivityTask', request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.ActivityResponse.SerializeToString, response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CompleteTaskResponse.FromString, - ) + _registered_method=True) self.CompleteOrchestratorTask = channel.unary_unary( '/TaskHubSidecarService/CompleteOrchestratorTask', request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.OrchestratorResponse.SerializeToString, response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CompleteTaskResponse.FromString, - ) + _registered_method=True) self.CompleteEntityTask = channel.unary_unary( '/TaskHubSidecarService/CompleteEntityTask', request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.EntityBatchResult.SerializeToString, response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CompleteTaskResponse.FromString, - ) + _registered_method=True) self.StreamInstanceHistory = channel.unary_stream( '/TaskHubSidecarService/StreamInstanceHistory', request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.StreamInstanceHistoryRequest.SerializeToString, response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.HistoryChunk.FromString, - ) + _registered_method=True) self.CreateTaskHub = channel.unary_unary( '/TaskHubSidecarService/CreateTaskHub', request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CreateTaskHubRequest.SerializeToString, response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CreateTaskHubResponse.FromString, - ) + _registered_method=True) self.DeleteTaskHub = channel.unary_unary( '/TaskHubSidecarService/DeleteTaskHub', request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.DeleteTaskHubRequest.SerializeToString, response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.DeleteTaskHubResponse.FromString, - ) + _registered_method=True) self.SignalEntity = channel.unary_unary( '/TaskHubSidecarService/SignalEntity', request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.SignalEntityRequest.SerializeToString, response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.SignalEntityResponse.FromString, - ) + _registered_method=True) self.GetEntity = channel.unary_unary( '/TaskHubSidecarService/GetEntity', request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetEntityRequest.SerializeToString, response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetEntityResponse.FromString, - ) + _registered_method=True) self.QueryEntities = channel.unary_unary( '/TaskHubSidecarService/QueryEntities', request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.QueryEntitiesRequest.SerializeToString, response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.QueryEntitiesResponse.FromString, - ) + _registered_method=True) self.CleanEntityStorage = channel.unary_unary( '/TaskHubSidecarService/CleanEntityStorage', request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CleanEntityStorageRequest.SerializeToString, response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CleanEntityStorageResponse.FromString, - ) + _registered_method=True) + self.AbandonTaskActivityWorkItem = channel.unary_unary( + '/TaskHubSidecarService/AbandonTaskActivityWorkItem', + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.AbandonActivityTaskRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.AbandonActivityTaskResponse.FromString, + _registered_method=True) + self.AbandonTaskOrchestratorWorkItem = channel.unary_unary( + '/TaskHubSidecarService/AbandonTaskOrchestratorWorkItem', + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.AbandonOrchestrationTaskRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.AbandonOrchestrationTaskResponse.FromString, + _registered_method=True) + self.AbandonTaskEntityWorkItem = channel.unary_unary( + '/TaskHubSidecarService/AbandonTaskEntityWorkItem', + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.AbandonEntityTaskRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.AbandonEntityTaskResponse.FromString, + _registered_method=True) + self.RerunWorkflowFromEvent = channel.unary_unary( + '/TaskHubSidecarService/RerunWorkflowFromEvent', + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RerunWorkflowFromEventRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RerunWorkflowFromEventResponse.FromString, + _registered_method=True) class TaskHubSidecarServiceServicer(object): @@ -292,6 +332,34 @@ def CleanEntityStorage(self, request, context): context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') + def AbandonTaskActivityWorkItem(self, request, context): + """Abandons a single work item + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def AbandonTaskOrchestratorWorkItem(self, request, context): + """Abandon an orchestration work item + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def AbandonTaskEntityWorkItem(self, request, context): + """Abandon an entity work item + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def RerunWorkflowFromEvent(self, request, context): + """Rerun a Workflow from a specific event ID of a workflow instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + def add_TaskHubSidecarServiceServicer_to_server(servicer, server): rpc_method_handlers = { @@ -410,10 +478,31 @@ def add_TaskHubSidecarServiceServicer_to_server(servicer, server): request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CleanEntityStorageRequest.FromString, response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CleanEntityStorageResponse.SerializeToString, ), + 'AbandonTaskActivityWorkItem': grpc.unary_unary_rpc_method_handler( + servicer.AbandonTaskActivityWorkItem, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.AbandonActivityTaskRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.AbandonActivityTaskResponse.SerializeToString, + ), + 'AbandonTaskOrchestratorWorkItem': grpc.unary_unary_rpc_method_handler( + servicer.AbandonTaskOrchestratorWorkItem, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.AbandonOrchestrationTaskRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.AbandonOrchestrationTaskResponse.SerializeToString, + ), + 'AbandonTaskEntityWorkItem': grpc.unary_unary_rpc_method_handler( + servicer.AbandonTaskEntityWorkItem, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.AbandonEntityTaskRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.AbandonEntityTaskResponse.SerializeToString, + ), + 'RerunWorkflowFromEvent': grpc.unary_unary_rpc_method_handler( + servicer.RerunWorkflowFromEvent, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RerunWorkflowFromEventRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RerunWorkflowFromEventResponse.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( 'TaskHubSidecarService', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) + server.add_registered_method_handlers('TaskHubSidecarService', rpc_method_handlers) # This class is part of an EXPERIMENTAL API. @@ -431,11 +520,21 @@ def Hello(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/Hello', + return grpc.experimental.unary_unary( + request, + target, + '/TaskHubSidecarService/Hello', google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) @staticmethod def StartInstance(request, @@ -448,11 +547,21 @@ def StartInstance(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/StartInstance', + return grpc.experimental.unary_unary( + request, + target, + '/TaskHubSidecarService/StartInstance', durabletask_dot_internal_dot_orchestrator__service__pb2.CreateInstanceRequest.SerializeToString, durabletask_dot_internal_dot_orchestrator__service__pb2.CreateInstanceResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) @staticmethod def GetInstance(request, @@ -465,11 +574,21 @@ def GetInstance(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/GetInstance', + return grpc.experimental.unary_unary( + request, + target, + '/TaskHubSidecarService/GetInstance', durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceRequest.SerializeToString, durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) @staticmethod def RewindInstance(request, @@ -482,11 +601,21 @@ def RewindInstance(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/RewindInstance', + return grpc.experimental.unary_unary( + request, + target, + '/TaskHubSidecarService/RewindInstance', durabletask_dot_internal_dot_orchestrator__service__pb2.RewindInstanceRequest.SerializeToString, durabletask_dot_internal_dot_orchestrator__service__pb2.RewindInstanceResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) @staticmethod def WaitForInstanceStart(request, @@ -499,11 +628,21 @@ def WaitForInstanceStart(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/WaitForInstanceStart', + return grpc.experimental.unary_unary( + request, + target, + '/TaskHubSidecarService/WaitForInstanceStart', durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceRequest.SerializeToString, durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) @staticmethod def WaitForInstanceCompletion(request, @@ -516,11 +655,21 @@ def WaitForInstanceCompletion(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/WaitForInstanceCompletion', + return grpc.experimental.unary_unary( + request, + target, + '/TaskHubSidecarService/WaitForInstanceCompletion', durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceRequest.SerializeToString, durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) @staticmethod def RaiseEvent(request, @@ -533,11 +682,21 @@ def RaiseEvent(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/RaiseEvent', + return grpc.experimental.unary_unary( + request, + target, + '/TaskHubSidecarService/RaiseEvent', durabletask_dot_internal_dot_orchestrator__service__pb2.RaiseEventRequest.SerializeToString, durabletask_dot_internal_dot_orchestrator__service__pb2.RaiseEventResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) @staticmethod def TerminateInstance(request, @@ -550,11 +709,21 @@ def TerminateInstance(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/TerminateInstance', + return grpc.experimental.unary_unary( + request, + target, + '/TaskHubSidecarService/TerminateInstance', durabletask_dot_internal_dot_orchestrator__service__pb2.TerminateRequest.SerializeToString, durabletask_dot_internal_dot_orchestrator__service__pb2.TerminateResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) @staticmethod def SuspendInstance(request, @@ -567,11 +736,21 @@ def SuspendInstance(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/SuspendInstance', + return grpc.experimental.unary_unary( + request, + target, + '/TaskHubSidecarService/SuspendInstance', durabletask_dot_internal_dot_orchestrator__service__pb2.SuspendRequest.SerializeToString, durabletask_dot_internal_dot_orchestrator__service__pb2.SuspendResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) @staticmethod def ResumeInstance(request, @@ -584,11 +763,21 @@ def ResumeInstance(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/ResumeInstance', + return grpc.experimental.unary_unary( + request, + target, + '/TaskHubSidecarService/ResumeInstance', durabletask_dot_internal_dot_orchestrator__service__pb2.ResumeRequest.SerializeToString, durabletask_dot_internal_dot_orchestrator__service__pb2.ResumeResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) @staticmethod def QueryInstances(request, @@ -601,11 +790,21 @@ def QueryInstances(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/QueryInstances', + return grpc.experimental.unary_unary( + request, + target, + '/TaskHubSidecarService/QueryInstances', durabletask_dot_internal_dot_orchestrator__service__pb2.QueryInstancesRequest.SerializeToString, durabletask_dot_internal_dot_orchestrator__service__pb2.QueryInstancesResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) @staticmethod def PurgeInstances(request, @@ -618,11 +817,21 @@ def PurgeInstances(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/PurgeInstances', + return grpc.experimental.unary_unary( + request, + target, + '/TaskHubSidecarService/PurgeInstances', durabletask_dot_internal_dot_orchestrator__service__pb2.PurgeInstancesRequest.SerializeToString, durabletask_dot_internal_dot_orchestrator__service__pb2.PurgeInstancesResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) @staticmethod def GetWorkItems(request, @@ -635,11 +844,21 @@ def GetWorkItems(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_stream(request, target, '/TaskHubSidecarService/GetWorkItems', + return grpc.experimental.unary_stream( + request, + target, + '/TaskHubSidecarService/GetWorkItems', durabletask_dot_internal_dot_orchestrator__service__pb2.GetWorkItemsRequest.SerializeToString, durabletask_dot_internal_dot_orchestrator__service__pb2.WorkItem.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) @staticmethod def CompleteActivityTask(request, @@ -652,11 +871,21 @@ def CompleteActivityTask(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/CompleteActivityTask', + return grpc.experimental.unary_unary( + request, + target, + '/TaskHubSidecarService/CompleteActivityTask', durabletask_dot_internal_dot_orchestrator__service__pb2.ActivityResponse.SerializeToString, durabletask_dot_internal_dot_orchestrator__service__pb2.CompleteTaskResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) @staticmethod def CompleteOrchestratorTask(request, @@ -669,11 +898,21 @@ def CompleteOrchestratorTask(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/CompleteOrchestratorTask', + return grpc.experimental.unary_unary( + request, + target, + '/TaskHubSidecarService/CompleteOrchestratorTask', durabletask_dot_internal_dot_orchestrator__service__pb2.OrchestratorResponse.SerializeToString, durabletask_dot_internal_dot_orchestrator__service__pb2.CompleteTaskResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) @staticmethod def CompleteEntityTask(request, @@ -686,11 +925,21 @@ def CompleteEntityTask(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/CompleteEntityTask', + return grpc.experimental.unary_unary( + request, + target, + '/TaskHubSidecarService/CompleteEntityTask', durabletask_dot_internal_dot_orchestrator__service__pb2.EntityBatchResult.SerializeToString, durabletask_dot_internal_dot_orchestrator__service__pb2.CompleteTaskResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) @staticmethod def StreamInstanceHistory(request, @@ -703,11 +952,21 @@ def StreamInstanceHistory(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_stream(request, target, '/TaskHubSidecarService/StreamInstanceHistory', + return grpc.experimental.unary_stream( + request, + target, + '/TaskHubSidecarService/StreamInstanceHistory', durabletask_dot_internal_dot_orchestrator__service__pb2.StreamInstanceHistoryRequest.SerializeToString, durabletask_dot_internal_dot_orchestrator__service__pb2.HistoryChunk.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) @staticmethod def CreateTaskHub(request, @@ -720,11 +979,21 @@ def CreateTaskHub(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/CreateTaskHub', + return grpc.experimental.unary_unary( + request, + target, + '/TaskHubSidecarService/CreateTaskHub', durabletask_dot_internal_dot_orchestrator__service__pb2.CreateTaskHubRequest.SerializeToString, durabletask_dot_internal_dot_orchestrator__service__pb2.CreateTaskHubResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) @staticmethod def DeleteTaskHub(request, @@ -737,11 +1006,21 @@ def DeleteTaskHub(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/DeleteTaskHub', + return grpc.experimental.unary_unary( + request, + target, + '/TaskHubSidecarService/DeleteTaskHub', durabletask_dot_internal_dot_orchestrator__service__pb2.DeleteTaskHubRequest.SerializeToString, durabletask_dot_internal_dot_orchestrator__service__pb2.DeleteTaskHubResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) @staticmethod def SignalEntity(request, @@ -754,11 +1033,21 @@ def SignalEntity(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/SignalEntity', + return grpc.experimental.unary_unary( + request, + target, + '/TaskHubSidecarService/SignalEntity', durabletask_dot_internal_dot_orchestrator__service__pb2.SignalEntityRequest.SerializeToString, durabletask_dot_internal_dot_orchestrator__service__pb2.SignalEntityResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) @staticmethod def GetEntity(request, @@ -771,11 +1060,21 @@ def GetEntity(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/GetEntity', + return grpc.experimental.unary_unary( + request, + target, + '/TaskHubSidecarService/GetEntity', durabletask_dot_internal_dot_orchestrator__service__pb2.GetEntityRequest.SerializeToString, durabletask_dot_internal_dot_orchestrator__service__pb2.GetEntityResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) @staticmethod def QueryEntities(request, @@ -788,11 +1087,21 @@ def QueryEntities(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/QueryEntities', + return grpc.experimental.unary_unary( + request, + target, + '/TaskHubSidecarService/QueryEntities', durabletask_dot_internal_dot_orchestrator__service__pb2.QueryEntitiesRequest.SerializeToString, durabletask_dot_internal_dot_orchestrator__service__pb2.QueryEntitiesResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) @staticmethod def CleanEntityStorage(request, @@ -805,8 +1114,126 @@ def CleanEntityStorage(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary(request, target, '/TaskHubSidecarService/CleanEntityStorage', + return grpc.experimental.unary_unary( + request, + target, + '/TaskHubSidecarService/CleanEntityStorage', durabletask_dot_internal_dot_orchestrator__service__pb2.CleanEntityStorageRequest.SerializeToString, durabletask_dot_internal_dot_orchestrator__service__pb2.CleanEntityStorageResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def AbandonTaskActivityWorkItem(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/TaskHubSidecarService/AbandonTaskActivityWorkItem', + durabletask_dot_internal_dot_orchestrator__service__pb2.AbandonActivityTaskRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.AbandonActivityTaskResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def AbandonTaskOrchestratorWorkItem(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/TaskHubSidecarService/AbandonTaskOrchestratorWorkItem', + durabletask_dot_internal_dot_orchestrator__service__pb2.AbandonOrchestrationTaskRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.AbandonOrchestrationTaskResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def AbandonTaskEntityWorkItem(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/TaskHubSidecarService/AbandonTaskEntityWorkItem', + durabletask_dot_internal_dot_orchestrator__service__pb2.AbandonEntityTaskRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.AbandonEntityTaskResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def RerunWorkflowFromEvent(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/TaskHubSidecarService/RerunWorkflowFromEvent', + durabletask_dot_internal_dot_orchestrator__service__pb2.RerunWorkflowFromEventRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.RerunWorkflowFromEventResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) From 849eddce30c48b59f461cb3772531a8fd362e7b6 Mon Sep 17 00:00:00 2001 From: Albert Callarisa Date: Mon, 8 Sep 2025 17:14:04 +0200 Subject: [PATCH 34/81] feat: Adds support for cross-app calls. Signed-off-by: Albert Callarisa --- durabletask/internal/helpers.py | 33 +++-- durabletask/task.py | 10 +- durabletask/worker.py | 38 +++++- .../test_orchestration_executor.py | 128 ++++++++++++++++++ 4 files changed, 191 insertions(+), 18 deletions(-) diff --git a/durabletask/internal/helpers.py b/durabletask/internal/helpers.py index 6b36586..48ab14b 100644 --- a/durabletask/internal/helpers.py +++ b/durabletask/internal/helpers.py @@ -178,11 +178,16 @@ def new_create_timer_action(id: int, fire_at: datetime) -> pb.OrchestratorAction return pb.OrchestratorAction(id=id, createTimer=pb.CreateTimerAction(fireAt=timestamp)) -def new_schedule_task_action(id: int, name: str, encoded_input: Optional[str]) -> pb.OrchestratorAction: - return pb.OrchestratorAction(id=id, scheduleTask=pb.ScheduleTaskAction( - name=name, - input=get_string_value(encoded_input) - )) +def new_schedule_task_action(id: int, name: str, encoded_input: Optional[str], router: Optional[pb.TaskRouter] = None) -> pb.OrchestratorAction: + return pb.OrchestratorAction( + id=id, + scheduleTask=pb.ScheduleTaskAction( + name=name, + input=get_string_value(encoded_input), + router=router, + ), + router=router, + ) def new_timestamp(dt: datetime) -> timestamp_pb2.Timestamp: @@ -195,12 +200,18 @@ def new_create_sub_orchestration_action( id: int, name: str, instance_id: Optional[str], - encoded_input: Optional[str]) -> pb.OrchestratorAction: - return pb.OrchestratorAction(id=id, createSubOrchestration=pb.CreateSubOrchestrationAction( - name=name, - instanceId=instance_id, - input=get_string_value(encoded_input) - )) + encoded_input: Optional[str], + router: Optional[pb.TaskRouter] = None) -> pb.OrchestratorAction: + return pb.OrchestratorAction( + id=id, + createSubOrchestration=pb.CreateSubOrchestrationAction( + name=name, + instanceId=instance_id, + input=get_string_value(encoded_input), + router=router, + ), + router=router, + ) def is_empty(v: wrappers_pb2.StringValue): diff --git a/durabletask/task.py b/durabletask/task.py index d319bf2..29af2c5 100644 --- a/durabletask/task.py +++ b/durabletask/task.py @@ -100,7 +100,8 @@ def create_timer(self, fire_at: Union[datetime, timedelta]) -> Task: @abstractmethod def call_activity(self, activity: Union[Activity[TInput, TOutput], str], *, input: Optional[TInput] = None, - retry_policy: Optional[RetryPolicy] = None) -> Task[TOutput]: + retry_policy: Optional[RetryPolicy] = None, + app_id: Optional[str] = None) -> Task[TOutput]: """Schedule an activity for execution. Parameters @@ -111,6 +112,8 @@ def call_activity(self, activity: Union[Activity[TInput, TOutput], str], *, The JSON-serializable input (or None) to pass to the activity. retry_policy: Optional[RetryPolicy] The retry policy to use for this activity call. + app_id: Optional[str] + The app ID that will execute the activity. If not specified, the activity will be executed by the same app as the orchestrator. Returns ------- @@ -123,7 +126,8 @@ def call_activity(self, activity: Union[Activity[TInput, TOutput], str], *, def call_sub_orchestrator(self, orchestrator: Orchestrator[TInput, TOutput], *, input: Optional[TInput] = None, instance_id: Optional[str] = None, - retry_policy: Optional[RetryPolicy] = None) -> Task[TOutput]: + retry_policy: Optional[RetryPolicy] = None, + app_id: Optional[str] = None) -> Task[TOutput]: """Schedule sub-orchestrator function for execution. Parameters @@ -137,6 +141,8 @@ def call_sub_orchestrator(self, orchestrator: Orchestrator[TInput, TOutput], *, random UUID will be used. retry_policy: Optional[RetryPolicy] The retry policy to use for this sub-orchestrator call. + app_id: Optional[str] + The app ID that will execute the sub-orchestrator. If not specified, the sub-orchestrator will be executed by the same app as the orchestrator. Returns ------- diff --git a/durabletask/worker.py b/durabletask/worker.py index 0922567..7a04649 100644 --- a/durabletask/worker.py +++ b/durabletask/worker.py @@ -584,6 +584,7 @@ def __init__(self, instance_id: str): self._sequence_number = 0 self._current_utc_datetime = datetime(1000, 1, 1) self._instance_id = instance_id + self._app_id = None self._completion_status: Optional[pb.OrchestrationStatus] = None self._received_events: dict[str, list[Any]] = {} self._pending_events: dict[str, list[task.CompletableTask]] = {} @@ -705,6 +706,10 @@ def next_sequence_number(self) -> int: self._sequence_number += 1 return self._sequence_number + @property + def app_id(self) -> str: + return self._app_id + @property def instance_id(self) -> str: return self._instance_id @@ -752,24 +757,29 @@ def call_activity( *, input: Optional[TInput] = None, retry_policy: Optional[task.RetryPolicy] = None, + app_id: Optional[str] = None, ) -> task.Task[TOutput]: id = self.next_sequence_number() self.call_activity_function_helper( - id, activity, input=input, retry_policy=retry_policy, is_sub_orch=False + id, activity, input=input, retry_policy=retry_policy, is_sub_orch=False, app_id=app_id ) return self._pending_tasks.get(id, task.CompletableTask()) def call_sub_orchestrator( self, - orchestrator: task.Orchestrator[TInput, TOutput], + orchestrator: Union[task.Orchestrator[TInput, TOutput], str], *, input: Optional[TInput] = None, instance_id: Optional[str] = None, retry_policy: Optional[task.RetryPolicy] = None, + app_id: Optional[str] = None, ) -> task.Task[TOutput]: id = self.next_sequence_number() - orchestrator_name = task.get_name(orchestrator) + if isinstance(orchestrator, str): + orchestrator_name = orchestrator + else: + orchestrator_name = task.get_name(orchestrator) self.call_activity_function_helper( id, orchestrator_name, @@ -777,6 +787,7 @@ def call_sub_orchestrator( retry_policy=retry_policy, is_sub_orch=True, instance_id=instance_id, + app_id=app_id, ) return self._pending_tasks.get(id, task.CompletableTask()) @@ -790,10 +801,16 @@ def call_activity_function_helper( is_sub_orch: bool = False, instance_id: Optional[str] = None, fn_task: Optional[task.CompletableTask[TOutput]] = None, + app_id: Optional[str] = None, ): if id is None: id = self.next_sequence_number() + router = pb.TaskRouter() + router.sourceAppID = self._app_id + if app_id is not None: + router.targetAppID = app_id + if fn_task is None: encoded_input = shared.to_json(input) if input is not None else None else: @@ -806,7 +823,7 @@ def call_activity_function_helper( if isinstance(activity_function, str) else task.get_name(activity_function) ) - action = ph.new_schedule_task_action(id, name, encoded_input) + action = ph.new_schedule_task_action(id, name, encoded_input, router) else: if instance_id is None: # Create a deteministic instance ID based on the parent instance ID @@ -814,7 +831,7 @@ def call_activity_function_helper( if not isinstance(activity_function, str): raise ValueError("Orchestrator function name must be a string") action = ph.new_create_sub_orchestration_action( - id, activity_function, instance_id, encoded_input + id, activity_function, instance_id, encoded_input, router ) self._pending_actions[id] = action @@ -953,6 +970,11 @@ def process_event( if event.HasField("orchestratorStarted"): ctx.current_utc_datetime = event.timestamp.ToDatetime() elif event.HasField("executionStarted"): + if event.router.targetAppID: + ctx._app_id = event.router.targetAppID + else: + ctx._app_id = event.router.sourceAppID + # TODO: Check if we already started the orchestration fn = self._registry.get_orchestrator(event.executionStarted.name) if fn is None: @@ -1010,6 +1032,11 @@ def process_event( else: cur_task = activity_action.createSubOrchestration instance_id = cur_task.instanceId + if cur_task.router and cur_task.router.targetAppID: + target_app_id = cur_task.router.targetAppID + else: + target_app_id = None + ctx.call_activity_function_helper( id=activity_action.id, activity_function=cur_task.name, @@ -1018,6 +1045,7 @@ def process_event( is_sub_orch=timer_task._retryable_parent._is_sub_orch, instance_id=instance_id, fn_task=timer_task._retryable_parent, + app_id=target_app_id, ) else: ctx.resume() diff --git a/tests/durabletask/test_orchestration_executor.py b/tests/durabletask/test_orchestration_executor.py index cb77c81..21f6c6c 100644 --- a/tests/durabletask/test_orchestration_executor.py +++ b/tests/durabletask/test_orchestration_executor.py @@ -171,6 +171,70 @@ def orchestrator(ctx: task.OrchestrationContext, orchestrator_input): assert actions[0].scheduleTask.input.value == encoded_input +def test_schedule_activity_actions_router_without_app_id(): + """Tests that scheduleTask action contains correct router fields when app_id is specified""" + def dummy_activity(ctx, _): + pass + + def orchestrator(ctx: task.OrchestrationContext, _): + yield ctx.call_activity(dummy_activity, input=42) + + registry = worker._Registry() + name = registry.add_orchestrator(orchestrator) + + # Prepare execution started event with source app set on router + exec_evt = helpers.new_execution_started_event(name, TEST_INSTANCE_ID, encoded_input=None) + exec_evt.router.sourceAppID = "source-app" + + new_events = [ + helpers.new_orchestrator_started_event(), + exec_evt, + ] + + executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) + result = executor.execute(TEST_INSTANCE_ID, [], new_events) + actions = result.actions + + assert len(actions) == 1 + action = actions[0] + assert action.router.sourceAppID == "source-app" + assert action.router.targetAppID == '' + assert action.scheduleTask.router.sourceAppID == "source-app" + assert action.scheduleTask.router.targetAppID == '' + + +def test_schedule_activity_actions_router_with_app_id(): + """Tests that scheduleTask action contains correct router fields when app_id is specified""" + def dummy_activity(ctx, _): + pass + + def orchestrator(ctx: task.OrchestrationContext, _): + yield ctx.call_activity(dummy_activity, input=42, app_id="target-app") + + registry = worker._Registry() + name = registry.add_orchestrator(orchestrator) + + # Prepare execution started event with source app set on router + exec_evt = helpers.new_execution_started_event(name, TEST_INSTANCE_ID, encoded_input=None) + exec_evt.router.sourceAppID = "source-app" + + new_events = [ + helpers.new_orchestrator_started_event(), + exec_evt, + ] + + executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) + result = executor.execute(TEST_INSTANCE_ID, [], new_events) + actions = result.actions + + assert len(actions) == 1 + action = actions[0] + assert action.router.sourceAppID == "source-app" + assert action.router.targetAppID == "target-app" + assert action.scheduleTask.router.sourceAppID == "source-app" + assert action.scheduleTask.router.targetAppID == "target-app" + + def test_activity_task_completion(): """Tests the successful completion of an activity task""" @@ -561,6 +625,70 @@ def orchestrator(ctx: task.OrchestrationContext, _): assert complete_action.result.value == "42" +def test_create_sub_orchestration_actions_router_without_app_id(): + """Tests that createSubOrchestration action contains correct router fields when app_id is specified""" + def suborchestrator(ctx: task.OrchestrationContext, _): + pass + + def orchestrator(ctx: task.OrchestrationContext, _): + yield ctx.call_sub_orchestrator(suborchestrator, input=None) + + registry = worker._Registry() + suborchestrator_name = registry.add_orchestrator(suborchestrator) + orchestrator_name = registry.add_orchestrator(orchestrator) + + exec_evt = helpers.new_execution_started_event(orchestrator_name, TEST_INSTANCE_ID, encoded_input=None) + exec_evt.router.sourceAppID = "source-app" + + new_events = [ + helpers.new_orchestrator_started_event(), + exec_evt, + ] + + executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) + result = executor.execute(TEST_INSTANCE_ID, [], new_events) + actions = result.actions + + assert len(actions) == 1 + action = actions[0] + assert action.router.sourceAppID == "source-app" + assert action.router.targetAppID == '' + assert action.createSubOrchestration.router.sourceAppID == "source-app" + assert action.createSubOrchestration.router.targetAppID == '' + + +def test_create_sub_orchestration_actions_router_with_app_id(): + """Tests that createSubOrchestration action contains correct router fields when app_id is specified""" + def suborchestrator(ctx: task.OrchestrationContext, _): + pass + + def orchestrator(ctx: task.OrchestrationContext, _): + yield ctx.call_sub_orchestrator(suborchestrator, input=None, app_id="target-app") + + registry = worker._Registry() + suborchestrator_name = registry.add_orchestrator(suborchestrator) + orchestrator_name = registry.add_orchestrator(orchestrator) + + exec_evt = helpers.new_execution_started_event(orchestrator_name, TEST_INSTANCE_ID, encoded_input=None) + exec_evt.router.sourceAppID = "source-app" + + new_events = [ + helpers.new_orchestrator_started_event(), + exec_evt, + ] + + executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) + result = executor.execute(TEST_INSTANCE_ID, [], new_events) + actions = result.actions + + assert len(actions) == 1 + action = actions[0] + assert action.router.sourceAppID == "source-app" + assert action.router.targetAppID == "target-app" + assert action.createSubOrchestration.router.sourceAppID == "source-app" + assert action.createSubOrchestration.router.targetAppID == "target-app" + + def test_sub_orchestration_task_failed(): """Tests that a sub-orchestration task is completed when the sub-orchestration fails""" def suborchestrator(ctx: task.OrchestrationContext, _): From 6a30ffd199f639320c37dc22e8aa6258cfd2f06e Mon Sep 17 00:00:00 2001 From: "nelson.parente" Date: Mon, 22 Sep 2025 18:48:38 +0100 Subject: [PATCH 35/81] chore: add codeowners Signed-off-by: nelson.parente --- CODEOWNERS | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 CODEOWNERS diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 0000000..0ca6992 --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1,2 @@ +# These owners are the maintainers and approvers of this repo +* @dapr/maintainers-python-sdk @dapr/approvers-python-sdk \ No newline at end of file From f4d8a38e0db80271ff7722ff0fd7fab1faafb3ef Mon Sep 17 00:00:00 2001 From: Patrick Assuied Date: Sat, 27 Sep 2025 12:31:32 -0700 Subject: [PATCH 36/81] - Introduced `AsyncTaskHubGrpcClient` as async implementation of `TaskHubGrpcClient` - Added e2e tests Signed-off-by: Patrick Assuied --- durabletask/aio/__init__.py | 5 + durabletask/aio/client.py | 160 ++++++ durabletask/aio/internal/__init__.py | 0 durabletask/aio/internal/grpc_interceptor.py | 55 ++ durabletask/aio/internal/shared.py | 46 ++ durabletask/task.py | 1 + durabletask/worker.py | 2 +- tests/durabletask/test_client.py | 1 + tests/durabletask/test_client_async.py | 103 ++++ tests/durabletask/test_orchestration_e2e.py | 1 - .../test_orchestration_e2e_async.py | 487 ++++++++++++++++++ .../test_orchestration_executor.py | 4 +- tests/durabletask/test_orchestration_wait.py | 7 +- 13 files changed, 864 insertions(+), 8 deletions(-) create mode 100644 durabletask/aio/__init__.py create mode 100644 durabletask/aio/client.py create mode 100644 durabletask/aio/internal/__init__.py create mode 100644 durabletask/aio/internal/grpc_interceptor.py create mode 100644 durabletask/aio/internal/shared.py create mode 100644 tests/durabletask/test_client_async.py create mode 100644 tests/durabletask/test_orchestration_e2e_async.py diff --git a/durabletask/aio/__init__.py b/durabletask/aio/__init__.py new file mode 100644 index 0000000..d446228 --- /dev/null +++ b/durabletask/aio/__init__.py @@ -0,0 +1,5 @@ +from .client import AsyncTaskHubGrpcClient + +__all__ = [ + "AsyncTaskHubGrpcClient", +] diff --git a/durabletask/aio/client.py b/durabletask/aio/client.py new file mode 100644 index 0000000..51797f3 --- /dev/null +++ b/durabletask/aio/client.py @@ -0,0 +1,160 @@ +import logging +import uuid +from datetime import datetime +from typing import Any, Optional, Sequence, Union + +import grpc +from google.protobuf import wrappers_pb2 + +import durabletask.internal.helpers as helpers +import durabletask.internal.orchestrator_service_pb2 as pb +import durabletask.internal.orchestrator_service_pb2_grpc as stubs +import durabletask.internal.shared as shared +from durabletask.aio.internal.shared import get_grpc_aio_channel, AioClientInterceptor +from durabletask import task +from durabletask.client import OrchestrationState, OrchestrationStatus, new_orchestration_state, TInput, TOutput +from durabletask.aio.internal.grpc_interceptor import DefaultAioClientInterceptorImpl + + +class AsyncTaskHubGrpcClient: + + def __init__(self, *, + host_address: Optional[str] = None, + metadata: Optional[list[tuple[str, str]]] = None, + log_handler: Optional[logging.Handler] = None, + log_formatter: Optional[logging.Formatter] = None, + secure_channel: bool = False, + interceptors: Optional[Sequence[AioClientInterceptor]] = None): + + if interceptors is not None: + interceptors = list(interceptors) + if metadata is not None: + interceptors.append(DefaultAioClientInterceptorImpl(metadata)) + elif metadata is not None: + interceptors = [DefaultAioClientInterceptorImpl(metadata)] + else: + interceptors = None + + channel = get_grpc_aio_channel( + host_address=host_address, + secure_channel=secure_channel, + interceptors=interceptors + ) + self._channel = channel + self._stub = stubs.TaskHubSidecarServiceStub(channel) + self._logger = shared.get_logger("client", log_handler, log_formatter) + + async def aclose(self): + await self._channel.close() + + async def schedule_new_orchestration(self, orchestrator: Union[task.Orchestrator[TInput, TOutput], str], *, + input: Optional[TInput] = None, + instance_id: Optional[str] = None, + start_at: Optional[datetime] = None, + reuse_id_policy: Optional[pb.OrchestrationIdReusePolicy] = None) -> str: + + name = orchestrator if isinstance(orchestrator, str) else task.get_name(orchestrator) + + req = pb.CreateInstanceRequest( + name=name, + instanceId=instance_id if instance_id else uuid.uuid4().hex, + input=wrappers_pb2.StringValue(value=shared.to_json(input)) if input is not None else None, + scheduledStartTimestamp=helpers.new_timestamp(start_at) if start_at else None, + version=helpers.get_string_value(None), + orchestrationIdReusePolicy=reuse_id_policy, + ) + + self._logger.info(f"Starting new '{name}' instance with ID = '{req.instanceId}'.") + res: pb.CreateInstanceResponse = await self._stub.StartInstance(req) + return res.instanceId + + async def get_orchestration_state(self, instance_id: str, *, fetch_payloads: bool = True) -> Optional[OrchestrationState]: + req = pb.GetInstanceRequest(instanceId=instance_id, getInputsAndOutputs=fetch_payloads) + res: pb.GetInstanceResponse = await self._stub.GetInstance(req) + return new_orchestration_state(req.instanceId, res) + + async def wait_for_orchestration_start(self, instance_id: str, *, + fetch_payloads: bool = False, + timeout: int = 0) -> Optional[OrchestrationState]: + req = pb.GetInstanceRequest(instanceId=instance_id, getInputsAndOutputs=fetch_payloads) + try: + grpc_timeout = None if timeout == 0 else timeout + self._logger.info( + f"Waiting {'indefinitely' if timeout == 0 else f'up to {timeout}s'} for instance '{instance_id}' to start.") + res: pb.GetInstanceResponse = await self._stub.WaitForInstanceStart(req, timeout=grpc_timeout) + return new_orchestration_state(req.instanceId, res) + except grpc.RpcError as rpc_error: + if rpc_error.code() == grpc.StatusCode.DEADLINE_EXCEEDED: # type: ignore + # Replace gRPC error with the built-in TimeoutError + raise TimeoutError("Timed-out waiting for the orchestration to start") + else: + raise + + async def wait_for_orchestration_completion(self, instance_id: str, *, + fetch_payloads: bool = True, + timeout: int = 0) -> Optional[OrchestrationState]: + req = pb.GetInstanceRequest(instanceId=instance_id, getInputsAndOutputs=fetch_payloads) + try: + grpc_timeout = None if timeout == 0 else timeout + self._logger.info( + f"Waiting {'indefinitely' if timeout == 0 else f'up to {timeout}s'} for instance '{instance_id}' to complete.") + res: pb.GetInstanceResponse = await self._stub.WaitForInstanceCompletion(req, timeout=grpc_timeout) + state = new_orchestration_state(req.instanceId, res) + if not state: + return None + + if state.runtime_status == OrchestrationStatus.FAILED and state.failure_details is not None: + details = state.failure_details + self._logger.info(f"Instance '{instance_id}' failed: [{details.error_type}] {details.message}") + elif state.runtime_status == OrchestrationStatus.TERMINATED: + self._logger.info(f"Instance '{instance_id}' was terminated.") + elif state.runtime_status == OrchestrationStatus.COMPLETED: + self._logger.info(f"Instance '{instance_id}' completed.") + + return state + except grpc.RpcError as rpc_error: + if rpc_error.code() == grpc.StatusCode.DEADLINE_EXCEEDED: # type: ignore + # Replace gRPC error with the built-in TimeoutError + raise TimeoutError("Timed-out waiting for the orchestration to complete") + else: + raise + + async def raise_orchestration_event( + self, + instance_id: str, + event_name: str, + *, + data: Optional[Any] = None): + req = pb.RaiseEventRequest( + instanceId=instance_id, + name=event_name, + input=wrappers_pb2.StringValue(value=shared.to_json(data)) if data else None) + + self._logger.info(f"Raising event '{event_name}' for instance '{instance_id}'.") + await self._stub.RaiseEvent(req) + + async def terminate_orchestration(self, instance_id: str, *, + output: Optional[Any] = None, + recursive: bool = True): + req = pb.TerminateRequest( + instanceId=instance_id, + output=wrappers_pb2.StringValue(value=shared.to_json(output)) if output else None, + recursive=recursive) + + self._logger.info(f"Terminating instance '{instance_id}'.") + await self._stub.TerminateInstance(req) + + async def suspend_orchestration(self, instance_id: str): + req = pb.SuspendRequest(instanceId=instance_id) + self._logger.info(f"Suspending instance '{instance_id}'.") + await self._stub.SuspendInstance(req) + + async def resume_orchestration(self, instance_id: str): + req = pb.ResumeRequest(instanceId=instance_id) + self._logger.info(f"Resuming instance '{instance_id}'.") + await self._stub.ResumeInstance(req) + + async def purge_orchestration(self, instance_id: str, recursive: bool = True): + req = pb.PurgeInstancesRequest(instanceId=instance_id, recursive=recursive) + self._logger.info(f"Purging instance '{instance_id}'.") + await self._stub.PurgeInstances(req) diff --git a/durabletask/aio/internal/__init__.py b/durabletask/aio/internal/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/durabletask/aio/internal/grpc_interceptor.py b/durabletask/aio/internal/grpc_interceptor.py new file mode 100644 index 0000000..d2c1eb0 --- /dev/null +++ b/durabletask/aio/internal/grpc_interceptor.py @@ -0,0 +1,55 @@ +from collections import namedtuple + +from grpc import aio as grpc_aio + + +class _AioClientCallDetails( + namedtuple( + '_AioClientCallDetails', + ['method', 'timeout', 'metadata', 'credentials', 'wait_for_ready', 'compression']), + grpc_aio.ClientCallDetails): + pass + + +class DefaultAioClientInterceptorImpl( + grpc_aio.UnaryUnaryClientInterceptor, grpc_aio.UnaryStreamClientInterceptor, + grpc_aio.StreamUnaryClientInterceptor, grpc_aio.StreamStreamClientInterceptor): + """Async gRPC client interceptor to add metadata to all calls.""" + + def __init__(self, metadata: list[tuple[str, str]]): + super().__init__() + self._metadata = metadata + + def _intercept_call(self, client_call_details: _AioClientCallDetails) -> grpc_aio.ClientCallDetails: + if self._metadata is None: + return client_call_details + + if client_call_details.metadata is not None: + metadata = list(client_call_details.metadata) + else: + metadata = [] + + metadata.extend(self._metadata) + return _AioClientCallDetails( + client_call_details.method, + client_call_details.timeout, + metadata, + client_call_details.credentials, + client_call_details.wait_for_ready, + client_call_details.compression) + + async def intercept_unary_unary(self, continuation, client_call_details, request): + new_client_call_details = self._intercept_call(client_call_details) + return await continuation(new_client_call_details, request) + + async def intercept_unary_stream(self, continuation, client_call_details, request): + new_client_call_details = self._intercept_call(client_call_details) + return await continuation(new_client_call_details, request) + + async def intercept_stream_unary(self, continuation, client_call_details, request_iterator): + new_client_call_details = self._intercept_call(client_call_details) + return await continuation(new_client_call_details, request_iterator) + + async def intercept_stream_stream(self, continuation, client_call_details, request_iterator): + new_client_call_details = self._intercept_call(client_call_details) + return await continuation(new_client_call_details, request_iterator) diff --git a/durabletask/aio/internal/shared.py b/durabletask/aio/internal/shared.py new file mode 100644 index 0000000..b15523d --- /dev/null +++ b/durabletask/aio/internal/shared.py @@ -0,0 +1,46 @@ +from typing import Optional, Sequence, Union + +import grpc +from grpc import aio as grpc_aio + +from durabletask.internal.shared import ( + get_default_host_address, + SECURE_PROTOCOLS, + INSECURE_PROTOCOLS, +) + + +AioClientInterceptor = Union[ + grpc_aio.UnaryUnaryClientInterceptor, + grpc_aio.UnaryStreamClientInterceptor, + grpc_aio.StreamUnaryClientInterceptor, + grpc_aio.StreamStreamClientInterceptor +] + + +def get_grpc_aio_channel( + host_address: Optional[str], + secure_channel: bool = False, + interceptors: Optional[Sequence[AioClientInterceptor]] = None) -> grpc_aio.Channel: + + if host_address is None: + host_address = get_default_host_address() + + for protocol in SECURE_PROTOCOLS: + if host_address.lower().startswith(protocol): + secure_channel = True + host_address = host_address[len(protocol):] + break + + for protocol in INSECURE_PROTOCOLS: + if host_address.lower().startswith(protocol): + secure_channel = False + host_address = host_address[len(protocol):] + break + + if secure_channel: + channel = grpc_aio.secure_channel(host_address, grpc.ssl_channel_credentials(), interceptors=interceptors) + else: + channel = grpc_aio.insecure_channel(host_address, interceptors=interceptors) + + return channel diff --git a/durabletask/task.py b/durabletask/task.py index 29af2c5..50970fd 100644 --- a/durabletask/task.py +++ b/durabletask/task.py @@ -283,6 +283,7 @@ def get_tasks(self) -> list[Task]: def on_child_completed(self, task: Task[T]): pass + class WhenAllTask(CompositeTask[list[T]]): """A task that completes when all of its child tasks complete.""" diff --git a/durabletask/worker.py b/durabletask/worker.py index 7a04649..e8e1fa9 100644 --- a/durabletask/worker.py +++ b/durabletask/worker.py @@ -880,13 +880,13 @@ class ExecutionResults: actions: list[pb.OrchestratorAction] encoded_custom_status: Optional[str] - def __init__( self, actions: list[pb.OrchestratorAction], encoded_custom_status: Optional[str] ): self.actions = actions self.encoded_custom_status = encoded_custom_status + class _OrchestrationExecutor: _generator: Optional[task.Orchestrator] = None diff --git a/tests/durabletask/test_client.py b/tests/durabletask/test_client.py index e5a8e9b..e750134 100644 --- a/tests/durabletask/test_client.py +++ b/tests/durabletask/test_client.py @@ -21,6 +21,7 @@ def test_get_grpc_channel_secure(): get_grpc_channel(HOST_ADDRESS, True, interceptors=INTERCEPTORS) mock_channel.assert_called_once_with(HOST_ADDRESS, mock_credentials.return_value) + def test_get_grpc_channel_default_host_address(): with patch('grpc.insecure_channel') as mock_channel: get_grpc_channel(None, False, interceptors=INTERCEPTORS) diff --git a/tests/durabletask/test_client_async.py b/tests/durabletask/test_client_async.py new file mode 100644 index 0000000..691f39e --- /dev/null +++ b/tests/durabletask/test_client_async.py @@ -0,0 +1,103 @@ +from unittest.mock import ANY, patch + +from durabletask.aio.internal.grpc_interceptor import DefaultAioClientInterceptorImpl +from durabletask.internal.shared import get_default_host_address +from durabletask.aio.internal.shared import get_grpc_aio_channel +from durabletask.aio.client import AsyncTaskHubGrpcClient + + +HOST_ADDRESS = 'localhost:50051' +METADATA = [('key1', 'value1'), ('key2', 'value2')] +INTERCEPTORS_AIO = [DefaultAioClientInterceptorImpl(METADATA)] + + +def test_get_grpc_aio_channel_insecure(): + with patch('durabletask.aio.internal.shared.grpc_aio.insecure_channel') as mock_channel: + get_grpc_aio_channel(HOST_ADDRESS, False, interceptors=INTERCEPTORS_AIO) + mock_channel.assert_called_once_with(HOST_ADDRESS, interceptors=INTERCEPTORS_AIO) + + +def test_get_grpc_aio_channel_secure(): + with patch('durabletask.aio.internal.shared.grpc_aio.secure_channel') as mock_channel, patch( + 'grpc.ssl_channel_credentials') as mock_credentials: + get_grpc_aio_channel(HOST_ADDRESS, True, interceptors=INTERCEPTORS_AIO) + mock_channel.assert_called_once_with(HOST_ADDRESS, mock_credentials.return_value, interceptors=INTERCEPTORS_AIO) + + +def test_get_grpc_aio_channel_default_host_address(): + with patch('durabletask.aio.internal.shared.grpc_aio.insecure_channel') as mock_channel: + get_grpc_aio_channel(None, False, interceptors=INTERCEPTORS_AIO) + mock_channel.assert_called_once_with(get_default_host_address(), interceptors=INTERCEPTORS_AIO) + + +def test_get_grpc_aio_channel_with_interceptors(): + with patch('durabletask.aio.internal.shared.grpc_aio.insecure_channel') as mock_channel: + get_grpc_aio_channel(HOST_ADDRESS, False, interceptors=INTERCEPTORS_AIO) + mock_channel.assert_called_once_with(HOST_ADDRESS, interceptors=INTERCEPTORS_AIO) + + # Capture and check the arguments passed to insecure_channel() + args, kwargs = mock_channel.call_args + assert args[0] == HOST_ADDRESS + assert 'interceptors' in kwargs + interceptors = kwargs['interceptors'] + assert isinstance(interceptors[0], DefaultAioClientInterceptorImpl) + assert interceptors[0]._metadata == METADATA + + +def test_grpc_aio_channel_with_host_name_protocol_stripping(): + with patch('durabletask.aio.internal.shared.grpc_aio.insecure_channel') as mock_insecure_channel, patch( + 'durabletask.aio.internal.shared.grpc_aio.secure_channel') as mock_secure_channel: + + host_name = "myserver.com:1234" + + prefix = "grpc://" + get_grpc_aio_channel(prefix + host_name, interceptors=INTERCEPTORS_AIO) + mock_insecure_channel.assert_called_with(host_name, interceptors=INTERCEPTORS_AIO) + + prefix = "http://" + get_grpc_aio_channel(prefix + host_name, interceptors=INTERCEPTORS_AIO) + mock_insecure_channel.assert_called_with(host_name, interceptors=INTERCEPTORS_AIO) + + prefix = "HTTP://" + get_grpc_aio_channel(prefix + host_name, interceptors=INTERCEPTORS_AIO) + mock_insecure_channel.assert_called_with(host_name, interceptors=INTERCEPTORS_AIO) + + prefix = "GRPC://" + get_grpc_aio_channel(prefix + host_name, interceptors=INTERCEPTORS_AIO) + mock_insecure_channel.assert_called_with(host_name, interceptors=INTERCEPTORS_AIO) + + prefix = "" + get_grpc_aio_channel(prefix + host_name, interceptors=INTERCEPTORS_AIO) + mock_insecure_channel.assert_called_with(host_name, interceptors=INTERCEPTORS_AIO) + + prefix = "grpcs://" + get_grpc_aio_channel(prefix + host_name, interceptors=INTERCEPTORS_AIO) + mock_secure_channel.assert_called_with(host_name, ANY, interceptors=INTERCEPTORS_AIO) + + prefix = "https://" + get_grpc_aio_channel(prefix + host_name, interceptors=INTERCEPTORS_AIO) + mock_secure_channel.assert_called_with(host_name, ANY, interceptors=INTERCEPTORS_AIO) + + prefix = "HTTPS://" + get_grpc_aio_channel(prefix + host_name, interceptors=INTERCEPTORS_AIO) + mock_secure_channel.assert_called_with(host_name, ANY, interceptors=INTERCEPTORS_AIO) + + prefix = "GRPCS://" + get_grpc_aio_channel(prefix + host_name, interceptors=INTERCEPTORS_AIO) + mock_secure_channel.assert_called_with(host_name, ANY, interceptors=INTERCEPTORS_AIO) + + prefix = "" + get_grpc_aio_channel(prefix + host_name, True, interceptors=INTERCEPTORS_AIO) + mock_secure_channel.assert_called_with(host_name, ANY, interceptors=INTERCEPTORS_AIO) + + +def test_async_client_construct_with_metadata(): + with patch('durabletask.aio.internal.shared.grpc_aio.insecure_channel') as mock_channel: + AsyncTaskHubGrpcClient(host_address=HOST_ADDRESS, metadata=METADATA) + # Ensure channel created with an interceptor that has the expected metadata + args, kwargs = mock_channel.call_args + assert args[0] == HOST_ADDRESS + assert 'interceptors' in kwargs + interceptors = kwargs['interceptors'] + assert isinstance(interceptors[0], DefaultAioClientInterceptorImpl) + assert interceptors[0]._metadata == METADATA diff --git a/tests/durabletask/test_orchestration_e2e.py b/tests/durabletask/test_orchestration_e2e.py index 2343184..76ec355 100644 --- a/tests/durabletask/test_orchestration_e2e.py +++ b/tests/durabletask/test_orchestration_e2e.py @@ -316,7 +316,6 @@ def parent_orchestrator(ctx: task.OrchestrationContext, count: int): output = "Recursive termination = {recurse}" task_hub_client.terminate_orchestration(instance_id, output=output, recursive=recurse) - metadata = task_hub_client.wait_for_orchestration_completion(instance_id, timeout=30) assert metadata is not None diff --git a/tests/durabletask/test_orchestration_e2e_async.py b/tests/durabletask/test_orchestration_e2e_async.py new file mode 100644 index 0000000..b35d33f --- /dev/null +++ b/tests/durabletask/test_orchestration_e2e_async.py @@ -0,0 +1,487 @@ +import asyncio +import json +import threading +from datetime import timedelta + +import pytest + +from durabletask.aio.client import AsyncTaskHubGrpcClient +from durabletask.client import OrchestrationStatus +from durabletask import task, worker + + +# NOTE: These tests assume a sidecar process is running. Example command: +# go install github.com/microsoft/durabletask-go@main +# durabletask-go --port 4001 +pytestmark = [pytest.mark.e2e, pytest.mark.anyio] + + +@pytest.fixture +def anyio_backend(): + return 'asyncio' + + +async def test_empty_orchestration(): + + invoked = False + + def empty_orchestrator(ctx: task.OrchestrationContext, _): + nonlocal invoked # don't do this in a real app! + invoked = True + + # Start a worker, which will connect to the sidecar in a background thread + with worker.TaskHubGrpcWorker() as w: + w.add_orchestrator(empty_orchestrator) + w.start() + + c = AsyncTaskHubGrpcClient() + id = await c.schedule_new_orchestration(empty_orchestrator) + state = await c.wait_for_orchestration_completion(id, timeout=30) + await c.aclose() + + assert invoked + assert state is not None + assert state.name == task.get_name(empty_orchestrator) + assert state.instance_id == id + assert state.failure_details is None + assert state.runtime_status == OrchestrationStatus.COMPLETED + assert state.serialized_input is None + assert state.serialized_output is None + assert state.serialized_custom_status is None + + +async def test_activity_sequence(): + + def plus_one(_: task.ActivityContext, input: int) -> int: + return input + 1 + + def sequence(ctx: task.OrchestrationContext, start_val: int): + numbers = [start_val] + current = start_val + for _ in range(10): + current = yield ctx.call_activity(plus_one, input=current) + numbers.append(current) + return numbers + + # Start a worker, which will connect to the sidecar in a background thread + with worker.TaskHubGrpcWorker() as w: + w.add_orchestrator(sequence) + w.add_activity(plus_one) + w.start() + + client = AsyncTaskHubGrpcClient() + id = await client.schedule_new_orchestration(sequence, input=1) + state = await client.wait_for_orchestration_completion(id, timeout=30) + await client.aclose() + + assert state is not None + assert state.name == task.get_name(sequence) + assert state.instance_id == id + assert state.runtime_status == OrchestrationStatus.COMPLETED + assert state.failure_details is None + assert state.serialized_input == json.dumps(1) + assert state.serialized_output == json.dumps([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) + assert state.serialized_custom_status is None + + +async def test_activity_error_handling(): + + def throw(_: task.ActivityContext, input: int) -> int: + raise RuntimeError("Kah-BOOOOM!!!") + + compensation_counter = 0 + + def increment_counter(ctx, _): + nonlocal compensation_counter + compensation_counter += 1 + + def orchestrator(ctx: task.OrchestrationContext, input: int): + error_msg = "" + try: + yield ctx.call_activity(throw, input=input) + except task.TaskFailedError as e: + error_msg = e.details.message + + # compensating actions + yield ctx.call_activity(increment_counter) + yield ctx.call_activity(increment_counter) + + return error_msg + + # Start a worker, which will connect to the sidecar in a background thread + with worker.TaskHubGrpcWorker() as w: + w.add_orchestrator(orchestrator) + w.add_activity(throw) + w.add_activity(increment_counter) + w.start() + + client = AsyncTaskHubGrpcClient() + id = await client.schedule_new_orchestration(orchestrator, input=1) + state = await client.wait_for_orchestration_completion(id, timeout=30) + await client.aclose() + + assert state is not None + assert state.name == task.get_name(orchestrator) + assert state.instance_id == id + assert state.runtime_status == OrchestrationStatus.COMPLETED + assert state.serialized_output == json.dumps("Kah-BOOOOM!!!") + assert state.failure_details is None + assert state.serialized_custom_status is None + assert compensation_counter == 2 + + +async def test_sub_orchestration_fan_out(): + threadLock = threading.Lock() + activity_counter = 0 + + def increment(ctx, _): + with threadLock: + nonlocal activity_counter + activity_counter += 1 + + def orchestrator_child(ctx: task.OrchestrationContext, activity_count: int): + for _ in range(activity_count): + yield ctx.call_activity(increment) + + def parent_orchestrator(ctx: task.OrchestrationContext, count: int): + # Fan out to multiple sub-orchestrations + tasks = [] + for _ in range(count): + tasks.append(ctx.call_sub_orchestrator( + orchestrator_child, input=3)) + # Wait for all sub-orchestrations to complete + yield task.when_all(tasks) + + # Start a worker, which will connect to the sidecar in a background thread + with worker.TaskHubGrpcWorker() as w: + w.add_activity(increment) + w.add_orchestrator(orchestrator_child) + w.add_orchestrator(parent_orchestrator) + w.start() + + client = AsyncTaskHubGrpcClient() + id = await client.schedule_new_orchestration(parent_orchestrator, input=10) + state = await client.wait_for_orchestration_completion(id, timeout=30) + await client.aclose() + + assert state is not None + assert state.runtime_status == OrchestrationStatus.COMPLETED + assert state.failure_details is None + assert activity_counter == 30 + + +async def test_wait_for_multiple_external_events(): + def orchestrator(ctx: task.OrchestrationContext, _): + a = yield ctx.wait_for_external_event('A') + b = yield ctx.wait_for_external_event('B') + c = yield ctx.wait_for_external_event('C') + return [a, b, c] + + # Start a worker, which will connect to the sidecar in a background thread + with worker.TaskHubGrpcWorker() as w: + w.add_orchestrator(orchestrator) + w.start() + + # Start the orchestration and immediately raise events to it. + client = AsyncTaskHubGrpcClient() + id = await client.schedule_new_orchestration(orchestrator) + await client.raise_orchestration_event(id, 'A', data='a') + await client.raise_orchestration_event(id, 'B', data='b') + await client.raise_orchestration_event(id, 'C', data='c') + state = await client.wait_for_orchestration_completion(id, timeout=30) + await client.aclose() + + assert state is not None + assert state.runtime_status == OrchestrationStatus.COMPLETED + assert state.serialized_output == json.dumps(['a', 'b', 'c']) + + +@pytest.mark.parametrize("raise_event", [True, False]) +async def test_wait_for_external_event_timeout(raise_event: bool): + def orchestrator(ctx: task.OrchestrationContext, _): + approval: task.Task[bool] = ctx.wait_for_external_event('Approval') + timeout = ctx.create_timer(timedelta(seconds=3)) + winner = yield task.when_any([approval, timeout]) + if winner == approval: + return "approved" + else: + return "timed out" + + # Start a worker, which will connect to the sidecar in a background thread + with worker.TaskHubGrpcWorker() as w: + w.add_orchestrator(orchestrator) + w.start() + + # Start the orchestration and immediately raise events to it. + client = AsyncTaskHubGrpcClient() + id = await client.schedule_new_orchestration(orchestrator) + if raise_event: + await client.raise_orchestration_event(id, 'Approval') + state = await client.wait_for_orchestration_completion(id, timeout=30) + await client.aclose() + + assert state is not None + assert state.runtime_status == OrchestrationStatus.COMPLETED + if raise_event: + assert state.serialized_output == json.dumps("approved") + else: + assert state.serialized_output == json.dumps("timed out") + + +async def test_suspend_and_resume(): + def orchestrator(ctx: task.OrchestrationContext, _): + result = yield ctx.wait_for_external_event("my_event") + return result + + # Start a worker, which will connect to the sidecar in a background thread + with worker.TaskHubGrpcWorker() as w: + w.add_orchestrator(orchestrator) + w.start() + + client = AsyncTaskHubGrpcClient() + id = await client.schedule_new_orchestration(orchestrator) + state = await client.wait_for_orchestration_start(id, timeout=30) + assert state is not None + + # Suspend the orchestration and wait for it to go into the SUSPENDED state + await client.suspend_orchestration(id) + while state.runtime_status == OrchestrationStatus.RUNNING: + await asyncio.sleep(0.1) + state = await client.get_orchestration_state(id) + assert state is not None + assert state.runtime_status == OrchestrationStatus.SUSPENDED + + # Raise an event to the orchestration and confirm that it does NOT complete + await client.raise_orchestration_event(id, "my_event", data=42) + try: + state = await client.wait_for_orchestration_completion(id, timeout=3) + assert False, "Orchestration should not have completed" + except TimeoutError: + pass + + # Resume the orchestration and wait for it to complete + await client.resume_orchestration(id) + state = await client.wait_for_orchestration_completion(id, timeout=30) + assert state is not None + assert state.runtime_status == OrchestrationStatus.COMPLETED + assert state.serialized_output == json.dumps(42) + await client.aclose() + + +async def test_terminate(): + def orchestrator(ctx: task.OrchestrationContext, _): + result = yield ctx.wait_for_external_event("my_event") + return result + + # Start a worker, which will connect to the sidecar in a background thread + with worker.TaskHubGrpcWorker() as w: + w.add_orchestrator(orchestrator) + w.start() + + client = AsyncTaskHubGrpcClient() + id = await client.schedule_new_orchestration(orchestrator) + state = await client.wait_for_orchestration_start(id, timeout=30) + assert state is not None + assert state.runtime_status == OrchestrationStatus.RUNNING + + await client.terminate_orchestration(id, output="some reason for termination") + state = await client.wait_for_orchestration_completion(id, timeout=30) + assert state is not None + assert state.runtime_status == OrchestrationStatus.TERMINATED + assert state.serialized_output == json.dumps("some reason for termination") + await client.aclose() + + +async def test_terminate_recursive(): + def root(ctx: task.OrchestrationContext, _): + result = yield ctx.call_sub_orchestrator(child) + return result + + def child(ctx: task.OrchestrationContext, _): + result = yield ctx.wait_for_external_event("my_event") + return result + + # Start a worker, which will connect to the sidecar in a background thread + with worker.TaskHubGrpcWorker() as w: + w.add_orchestrator(root) + w.add_orchestrator(child) + w.start() + + client = AsyncTaskHubGrpcClient() + id = await client.schedule_new_orchestration(root) + state = await client.wait_for_orchestration_start(id, timeout=30) + assert state is not None + assert state.runtime_status == OrchestrationStatus.RUNNING + + # Terminate root orchestration(recursive set to True by default) + await client.terminate_orchestration(id, output="some reason for termination") + state = await client.wait_for_orchestration_completion(id, timeout=30) + assert state is not None + assert state.runtime_status == OrchestrationStatus.TERMINATED + + # Verify that child orchestration is also terminated + await client.wait_for_orchestration_completion(id, timeout=30) + assert state is not None + assert state.runtime_status == OrchestrationStatus.TERMINATED + + await client.purge_orchestration(id) + state = await client.get_orchestration_state(id) + assert state is None + await client.aclose() + + +async def test_continue_as_new(): + all_results = [] + + def orchestrator(ctx: task.OrchestrationContext, input: int): + result = yield ctx.wait_for_external_event("my_event") + if not ctx.is_replaying: + # NOTE: Real orchestrations should never interact with nonlocal variables like this. + nonlocal all_results # noqa: F824 + all_results.append(result) + + if len(all_results) <= 4: + ctx.continue_as_new(max(all_results), save_events=True) + else: + return all_results + + # Start a worker, which will connect to the sidecar in a background thread + with worker.TaskHubGrpcWorker() as w: + w.add_orchestrator(orchestrator) + w.start() + + client = AsyncTaskHubGrpcClient() + id = await client.schedule_new_orchestration(orchestrator, input=0) + await client.raise_orchestration_event(id, "my_event", data=1) + await client.raise_orchestration_event(id, "my_event", data=2) + await client.raise_orchestration_event(id, "my_event", data=3) + await client.raise_orchestration_event(id, "my_event", data=4) + await client.raise_orchestration_event(id, "my_event", data=5) + + state = await client.wait_for_orchestration_completion(id, timeout=30) + assert state is not None + assert state.runtime_status == OrchestrationStatus.COMPLETED + assert state.serialized_output == json.dumps(all_results) + assert state.serialized_input == json.dumps(4) + assert all_results == [1, 2, 3, 4, 5] + await client.aclose() + + +async def test_retry_policies(): + # This test verifies that the retry policies are working as expected. + # It does this by creating an orchestration that calls a sub-orchestrator, + # which in turn calls an activity that always fails. + # In this test, the retry policies are added, and the orchestration + # should still fail. But, number of times the sub-orchestrator and activity + # is called should increase as per the retry policies. + + child_orch_counter = 0 + throw_activity_counter = 0 + + # Second setup: With retry policies + retry_policy = task.RetryPolicy( + first_retry_interval=timedelta(seconds=1), + max_number_of_attempts=3, + backoff_coefficient=1, + max_retry_interval=timedelta(seconds=10), + retry_timeout=timedelta(seconds=30)) + + def parent_orchestrator_with_retry(ctx: task.OrchestrationContext, _): + yield ctx.call_sub_orchestrator(child_orchestrator_with_retry, retry_policy=retry_policy) + + def child_orchestrator_with_retry(ctx: task.OrchestrationContext, _): + nonlocal child_orch_counter + if not ctx.is_replaying: + # NOTE: Real orchestrations should never interact with nonlocal variables like this. + # This is done only for testing purposes. + child_orch_counter += 1 + yield ctx.call_activity(throw_activity_with_retry, retry_policy=retry_policy) + + def throw_activity_with_retry(ctx: task.ActivityContext, _): + nonlocal throw_activity_counter + throw_activity_counter += 1 + raise RuntimeError("Kah-BOOOOM!!!") + + with worker.TaskHubGrpcWorker() as w: + w.add_orchestrator(parent_orchestrator_with_retry) + w.add_orchestrator(child_orchestrator_with_retry) + w.add_activity(throw_activity_with_retry) + w.start() + + client = AsyncTaskHubGrpcClient() + id = await client.schedule_new_orchestration(parent_orchestrator_with_retry) + state = await client.wait_for_orchestration_completion(id, timeout=30) + assert state is not None + assert state.runtime_status == OrchestrationStatus.FAILED + assert state.failure_details is not None + assert state.failure_details.error_type == "TaskFailedError" + assert state.failure_details.message.startswith("Sub-orchestration task #1 failed:") + assert state.failure_details.message.endswith("Activity task #1 failed: Kah-BOOOOM!!!") + assert state.failure_details.stack_trace is not None + assert throw_activity_counter == 9 + assert child_orch_counter == 3 + await client.aclose() + + +async def test_retry_timeout(): + # This test verifies that the retry timeout is working as expected. + # Max number of attempts is 5 and retry timeout is 14 seconds. + # Total seconds consumed till 4th attempt is 1 + 2 + 4 + 8 = 15 seconds. + # So, the 5th attempt should not be made and the orchestration should fail. + throw_activity_counter = 0 + retry_policy = task.RetryPolicy( + first_retry_interval=timedelta(seconds=1), + max_number_of_attempts=5, + backoff_coefficient=2, + max_retry_interval=timedelta(seconds=10), + retry_timeout=timedelta(seconds=14)) + + def mock_orchestrator(ctx: task.OrchestrationContext, _): + yield ctx.call_activity(throw_activity, retry_policy=retry_policy) + + def throw_activity(ctx: task.ActivityContext, _): + nonlocal throw_activity_counter + throw_activity_counter += 1 + raise RuntimeError("Kah-BOOOOM!!!") + + with worker.TaskHubGrpcWorker() as w: + w.add_orchestrator(mock_orchestrator) + w.add_activity(throw_activity) + w.start() + + client = AsyncTaskHubGrpcClient() + id = await client.schedule_new_orchestration(mock_orchestrator) + state = await client.wait_for_orchestration_completion(id, timeout=30) + assert state is not None + assert state.runtime_status == OrchestrationStatus.FAILED + assert state.failure_details is not None + assert state.failure_details.error_type == "TaskFailedError" + assert state.failure_details.message.endswith("Activity task #1 failed: Kah-BOOOOM!!!") + assert state.failure_details.stack_trace is not None + assert throw_activity_counter == 4 + await client.aclose() + + +async def test_custom_status(): + + def empty_orchestrator(ctx: task.OrchestrationContext, _): + ctx.set_custom_status("foobaz") + + # Start a worker, which will connect to the sidecar in a background thread + with worker.TaskHubGrpcWorker() as w: + w.add_orchestrator(empty_orchestrator) + w.start() + + c = AsyncTaskHubGrpcClient() + id = await c.schedule_new_orchestration(empty_orchestrator) + state = await c.wait_for_orchestration_completion(id, timeout=30) + await c.aclose() + + assert state is not None + assert state.name == task.get_name(empty_orchestrator) + assert state.instance_id == id + assert state.failure_details is None + assert state.runtime_status == OrchestrationStatus.COMPLETED + assert state.serialized_input is None + assert state.serialized_output is None + assert state.serialized_custom_status == "\"foobaz\"" diff --git a/tests/durabletask/test_orchestration_executor.py b/tests/durabletask/test_orchestration_executor.py index 21f6c6c..c784135 100644 --- a/tests/durabletask/test_orchestration_executor.py +++ b/tests/durabletask/test_orchestration_executor.py @@ -634,7 +634,7 @@ def orchestrator(ctx: task.OrchestrationContext, _): yield ctx.call_sub_orchestrator(suborchestrator, input=None) registry = worker._Registry() - suborchestrator_name = registry.add_orchestrator(suborchestrator) + registry.add_orchestrator(suborchestrator) orchestrator_name = registry.add_orchestrator(orchestrator) exec_evt = helpers.new_execution_started_event(orchestrator_name, TEST_INSTANCE_ID, encoded_input=None) @@ -666,7 +666,7 @@ def orchestrator(ctx: task.OrchestrationContext, _): yield ctx.call_sub_orchestrator(suborchestrator, input=None, app_id="target-app") registry = worker._Registry() - suborchestrator_name = registry.add_orchestrator(suborchestrator) + registry.add_orchestrator(suborchestrator) orchestrator_name = registry.add_orchestrator(orchestrator) exec_evt = helpers.new_execution_started_event(orchestrator_name, TEST_INSTANCE_ID, encoded_input=None) diff --git a/tests/durabletask/test_orchestration_wait.py b/tests/durabletask/test_orchestration_wait.py index 03f7e30..c27345f 100644 --- a/tests/durabletask/test_orchestration_wait.py +++ b/tests/durabletask/test_orchestration_wait.py @@ -1,11 +1,9 @@ -from unittest.mock import patch, ANY, Mock +from unittest.mock import Mock from durabletask.client import TaskHubGrpcClient -from durabletask.internal.grpc_interceptor import DefaultClientInterceptorImpl -from durabletask.internal.shared import (get_default_host_address, - get_grpc_channel) import pytest + @pytest.mark.parametrize("timeout", [None, 0, 5]) def test_wait_for_orchestration_start_timeout(timeout): instance_id = "test-instance" @@ -34,6 +32,7 @@ def test_wait_for_orchestration_start_timeout(timeout): else: assert kwargs.get('timeout') == timeout + @pytest.mark.parametrize("timeout", [None, 0, 5]) def test_wait_for_orchestration_completion_timeout(timeout): instance_id = "test-instance" From f3f1c4babc8c6959cc5b9b28fc21900f1ad4964b Mon Sep 17 00:00:00 2001 From: Patrick Assuied Date: Sun, 28 Sep 2025 08:54:44 -0700 Subject: [PATCH 37/81] Switch to pytest-asyncio fixed dev dependencies Signed-off-by: Patrick Assuied --- .github/workflows/pr-validation.yml | 2 +- README.md | 2 ++ dev-requirements.txt | 4 ++++ requirements.txt | 2 -- tests/durabletask/test_orchestration_e2e_async.py | 7 +------ 5 files changed, 8 insertions(+), 9 deletions(-) diff --git a/.github/workflows/pr-validation.yml b/.github/workflows/pr-validation.yml index 63540ac..33de31f 100644 --- a/.github/workflows/pr-validation.yml +++ b/.github/workflows/pr-validation.yml @@ -28,7 +28,7 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip - pip install flake8 pytest + pip install flake8 pytest pytest-cov pytest-asyncio pip install -r requirements.txt - name: Lint with flake8 run: | diff --git a/README.md b/README.md index 4a45d9b..3f691c0 100644 --- a/README.md +++ b/README.md @@ -173,6 +173,7 @@ This will download the `orchestrator_service.proto` from the `microsoft/durablet Unit tests can be run using the following command from the project root. Unit tests _don't_ require a sidecar process to be running. ```sh +pip3 install -r dev-requirements.txt make test-unit ``` @@ -188,6 +189,7 @@ durabletask-go --port 4001 To run the E2E tests, run the following command from the project root: ```sh +pip3 install -r dev-requirements.txt make test-e2e ``` diff --git a/dev-requirements.txt b/dev-requirements.txt index 119f072..58f0b35 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -1 +1,5 @@ grpcio-tools==1.62.3 # 1.62.X is the latest version before protobuf 1.26.X is used which has breaking changes for Python +pytest +pytest-cov +pytest-asyncio +flake8 \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 07426eb..41566b3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,4 @@ autopep8 grpcio>=1.60.0 # 1.60.0 is the version introducing protobuf 1.25.X support, newer versions are backwards compatible protobuf -pytest -pytest-cov asyncio diff --git a/tests/durabletask/test_orchestration_e2e_async.py b/tests/durabletask/test_orchestration_e2e_async.py index b35d33f..7717840 100644 --- a/tests/durabletask/test_orchestration_e2e_async.py +++ b/tests/durabletask/test_orchestration_e2e_async.py @@ -13,12 +13,7 @@ # NOTE: These tests assume a sidecar process is running. Example command: # go install github.com/microsoft/durabletask-go@main # durabletask-go --port 4001 -pytestmark = [pytest.mark.e2e, pytest.mark.anyio] - - -@pytest.fixture -def anyio_backend(): - return 'asyncio' +pytestmark = [pytest.mark.e2e, pytest.mark.asyncio] async def test_empty_orchestration(): From a21da3f5fecf404e6a000a50f754366bada6dd7a Mon Sep 17 00:00:00 2001 From: Patrick Assuied Date: Tue, 30 Sep 2025 11:10:26 -0700 Subject: [PATCH 38/81] Rename classes to avoid repeating `Aio` per PR feedback. Also cleaning dependencies to align protobuf dependencies between grpc-tools and grpcio Signed-off-by: Patrick Assuied --- dev-requirements.txt | 2 +- durabletask/aio/client.py | 10 +++++----- durabletask/aio/internal/grpc_interceptor.py | 10 +++++----- durabletask/aio/internal/shared.py | 4 ++-- requirements.txt | 2 +- tests/durabletask/test_client_async.py | 8 ++++---- 6 files changed, 18 insertions(+), 18 deletions(-) diff --git a/dev-requirements.txt b/dev-requirements.txt index 58f0b35..80d1ba7 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -1,4 +1,4 @@ -grpcio-tools==1.62.3 # 1.62.X is the latest version before protobuf 1.26.X is used which has breaking changes for Python +grpcio-tools>=1.75.1 # supports protobuf 6.x and aligns with generated code pytest pytest-cov pytest-asyncio diff --git a/durabletask/aio/client.py b/durabletask/aio/client.py index 51797f3..ee5abd7 100644 --- a/durabletask/aio/client.py +++ b/durabletask/aio/client.py @@ -10,10 +10,10 @@ import durabletask.internal.orchestrator_service_pb2 as pb import durabletask.internal.orchestrator_service_pb2_grpc as stubs import durabletask.internal.shared as shared -from durabletask.aio.internal.shared import get_grpc_aio_channel, AioClientInterceptor +from durabletask.aio.internal.shared import get_grpc_aio_channel, ClientInterceptor from durabletask import task from durabletask.client import OrchestrationState, OrchestrationStatus, new_orchestration_state, TInput, TOutput -from durabletask.aio.internal.grpc_interceptor import DefaultAioClientInterceptorImpl +from durabletask.aio.internal.grpc_interceptor import DefaultClientInterceptorImpl class AsyncTaskHubGrpcClient: @@ -24,14 +24,14 @@ def __init__(self, *, log_handler: Optional[logging.Handler] = None, log_formatter: Optional[logging.Formatter] = None, secure_channel: bool = False, - interceptors: Optional[Sequence[AioClientInterceptor]] = None): + interceptors: Optional[Sequence[ClientInterceptor]] = None): if interceptors is not None: interceptors = list(interceptors) if metadata is not None: - interceptors.append(DefaultAioClientInterceptorImpl(metadata)) + interceptors.append(DefaultClientInterceptorImpl(metadata)) elif metadata is not None: - interceptors = [DefaultAioClientInterceptorImpl(metadata)] + interceptors = [DefaultClientInterceptorImpl(metadata)] else: interceptors = None diff --git a/durabletask/aio/internal/grpc_interceptor.py b/durabletask/aio/internal/grpc_interceptor.py index d2c1eb0..06dae95 100644 --- a/durabletask/aio/internal/grpc_interceptor.py +++ b/durabletask/aio/internal/grpc_interceptor.py @@ -3,15 +3,15 @@ from grpc import aio as grpc_aio -class _AioClientCallDetails( +class _ClientCallDetails( namedtuple( - '_AioClientCallDetails', + '_ClientCallDetails', ['method', 'timeout', 'metadata', 'credentials', 'wait_for_ready', 'compression']), grpc_aio.ClientCallDetails): pass -class DefaultAioClientInterceptorImpl( +class DefaultClientInterceptorImpl( grpc_aio.UnaryUnaryClientInterceptor, grpc_aio.UnaryStreamClientInterceptor, grpc_aio.StreamUnaryClientInterceptor, grpc_aio.StreamStreamClientInterceptor): """Async gRPC client interceptor to add metadata to all calls.""" @@ -20,7 +20,7 @@ def __init__(self, metadata: list[tuple[str, str]]): super().__init__() self._metadata = metadata - def _intercept_call(self, client_call_details: _AioClientCallDetails) -> grpc_aio.ClientCallDetails: + def _intercept_call(self, client_call_details: _ClientCallDetails) -> grpc_aio.ClientCallDetails: if self._metadata is None: return client_call_details @@ -30,7 +30,7 @@ def _intercept_call(self, client_call_details: _AioClientCallDetails) -> grpc_ai metadata = [] metadata.extend(self._metadata) - return _AioClientCallDetails( + return _ClientCallDetails( client_call_details.method, client_call_details.timeout, metadata, diff --git a/durabletask/aio/internal/shared.py b/durabletask/aio/internal/shared.py index b15523d..3e09ff3 100644 --- a/durabletask/aio/internal/shared.py +++ b/durabletask/aio/internal/shared.py @@ -10,7 +10,7 @@ ) -AioClientInterceptor = Union[ +ClientInterceptor = Union[ grpc_aio.UnaryUnaryClientInterceptor, grpc_aio.UnaryStreamClientInterceptor, grpc_aio.StreamUnaryClientInterceptor, @@ -21,7 +21,7 @@ def get_grpc_aio_channel( host_address: Optional[str], secure_channel: bool = False, - interceptors: Optional[Sequence[AioClientInterceptor]] = None) -> grpc_aio.Channel: + interceptors: Optional[Sequence[ClientInterceptor]] = None) -> grpc_aio.Channel: if host_address is None: host_address = get_default_host_address() diff --git a/requirements.txt b/requirements.txt index 41566b3..0f47c7f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ autopep8 grpcio>=1.60.0 # 1.60.0 is the version introducing protobuf 1.25.X support, newer versions are backwards compatible -protobuf +protobuf>=6,<7 asyncio diff --git a/tests/durabletask/test_client_async.py b/tests/durabletask/test_client_async.py index 691f39e..6e2b919 100644 --- a/tests/durabletask/test_client_async.py +++ b/tests/durabletask/test_client_async.py @@ -1,6 +1,6 @@ from unittest.mock import ANY, patch -from durabletask.aio.internal.grpc_interceptor import DefaultAioClientInterceptorImpl +from durabletask.aio.internal.grpc_interceptor import DefaultClientInterceptorImpl from durabletask.internal.shared import get_default_host_address from durabletask.aio.internal.shared import get_grpc_aio_channel from durabletask.aio.client import AsyncTaskHubGrpcClient @@ -8,7 +8,7 @@ HOST_ADDRESS = 'localhost:50051' METADATA = [('key1', 'value1'), ('key2', 'value2')] -INTERCEPTORS_AIO = [DefaultAioClientInterceptorImpl(METADATA)] +INTERCEPTORS_AIO = [DefaultClientInterceptorImpl(METADATA)] def test_get_grpc_aio_channel_insecure(): @@ -40,7 +40,7 @@ def test_get_grpc_aio_channel_with_interceptors(): assert args[0] == HOST_ADDRESS assert 'interceptors' in kwargs interceptors = kwargs['interceptors'] - assert isinstance(interceptors[0], DefaultAioClientInterceptorImpl) + assert isinstance(interceptors[0], DefaultClientInterceptorImpl) assert interceptors[0]._metadata == METADATA @@ -99,5 +99,5 @@ def test_async_client_construct_with_metadata(): assert args[0] == HOST_ADDRESS assert 'interceptors' in kwargs interceptors = kwargs['interceptors'] - assert isinstance(interceptors[0], DefaultAioClientInterceptorImpl) + assert isinstance(interceptors[0], DefaultClientInterceptorImpl) assert interceptors[0]._metadata == METADATA From eac4b8269d746c7015c032d637b58de76523201f Mon Sep 17 00:00:00 2001 From: Patrick Assuied Date: Tue, 30 Sep 2025 11:30:35 -0700 Subject: [PATCH 39/81] Fixed a bug where `when_all()` and `when_any()` are passed empty lists they return successfully. Added corresponding unit tests for happy path and edge case Signed-off-by: Patrick Assuied --- durabletask/task.py | 8 +++ .../test_orchestration_e2e_async.py | 2 + tests/durabletask/test_task.py | 67 +++++++++++++++++++ 3 files changed, 77 insertions(+) create mode 100644 tests/durabletask/test_task.py diff --git a/durabletask/task.py b/durabletask/task.py index 50970fd..5210c99 100644 --- a/durabletask/task.py +++ b/durabletask/task.py @@ -291,6 +291,10 @@ def __init__(self, tasks: list[Task[T]]): super().__init__(tasks) self._completed_tasks = 0 self._failed_tasks = 0 + # If there are no child tasks, this composite should complete immediately + if len(self._tasks) == 0: + self._result = [] # type: ignore[assignment] + self._is_complete = True @property def pending_tasks(self) -> int: @@ -388,6 +392,10 @@ class WhenAnyTask(CompositeTask[Task]): def __init__(self, tasks: list[Task]): super().__init__(tasks) + # If there are no child tasks, complete immediately with an empty result + if len(self._tasks) == 0: + self._result = [] # type: ignore[assignment] + self._is_complete = True def on_child_completed(self, task: Task): # The first task to complete is the result of the WhenAnyTask. diff --git a/tests/durabletask/test_orchestration_e2e_async.py b/tests/durabletask/test_orchestration_e2e_async.py index 7717840..eab2135 100644 --- a/tests/durabletask/test_orchestration_e2e_async.py +++ b/tests/durabletask/test_orchestration_e2e_async.py @@ -232,6 +232,8 @@ def orchestrator(ctx: task.OrchestrationContext, _): with worker.TaskHubGrpcWorker() as w: w.add_orchestrator(orchestrator) w.start() + # there could be a race condition if the workflow is scheduled before orchestrator is started + await asyncio.sleep(0.2) client = AsyncTaskHubGrpcClient() id = await client.schedule_new_orchestration(orchestrator) diff --git a/tests/durabletask/test_task.py b/tests/durabletask/test_task.py new file mode 100644 index 0000000..914df5b --- /dev/null +++ b/tests/durabletask/test_task.py @@ -0,0 +1,67 @@ +"""Unit tests for durabletask.task primitives.""" + +from durabletask import task + + +def test_when_all_empty_returns_successfully(): + """task.when_all([]) should complete immediately and return an empty list.""" + when_all_task = task.when_all([]) + + assert when_all_task.is_complete + assert when_all_task.get_result() == [] + +def test_when_any_empty_returns_successfully(): + """task.when_any([]) should complete immediately and return an empty list.""" + when_any_task = task.when_any([]) + + assert when_any_task.is_complete + assert when_any_task.get_result() == [] + + +def test_when_all_happy_path_returns_ordered_results_and_completes_last(): + c1 = task.CompletableTask() + c2 = task.CompletableTask() + c3 = task.CompletableTask() + + all_task = task.when_all([c1, c2, c3]) + + assert not all_task.is_complete + + c2.complete("two") + + assert not all_task.is_complete + + c1.complete("one") + + assert not all_task.is_complete + + c3.complete("three") + + assert all_task.is_complete + + assert all_task.get_result() == ["one", "two", "three"] + + +def test_when_any_happy_path_returns_winner_task_and_completes_on_first(): + a = task.CompletableTask() + b = task.CompletableTask() + + any_task = task.when_any([a, b]) + + assert not any_task.is_complete + + b.complete("B") + + assert any_task.is_complete + + winner = any_task.get_result() + + assert winner is b + + assert winner.get_result() == "B" + + # Completing the other child should not change the winner + a.complete("A") + + assert any_task.get_result() is b + From ecccbef3a0b287494fcb92afd5ca120bec45f0b7 Mon Sep 17 00:00:00 2001 From: Patrick Assuied Date: Wed, 1 Oct 2025 10:47:29 -0700 Subject: [PATCH 40/81] Enabled context manager in client. Added copyright headers on new files Signed-off-by: Patrick Assuied --- durabletask/aio/client.py | 10 + durabletask/aio/internal/grpc_interceptor.py | 3 + durabletask/aio/internal/shared.py | 3 + tests/durabletask/test_client_async.py | 3 + .../test_orchestration_e2e_async.py | 190 +++++++++--------- tests/durabletask/test_task.py | 5 +- 6 files changed, 116 insertions(+), 98 deletions(-) diff --git a/durabletask/aio/client.py b/durabletask/aio/client.py index ee5abd7..4ec9bbf 100644 --- a/durabletask/aio/client.py +++ b/durabletask/aio/client.py @@ -1,3 +1,6 @@ +# Copyright (c) The Dapr Authors. +# Licensed under the MIT License. + import logging import uuid from datetime import datetime @@ -47,6 +50,13 @@ def __init__(self, *, async def aclose(self): await self._channel.close() + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + await self.aclose() + return False + async def schedule_new_orchestration(self, orchestrator: Union[task.Orchestrator[TInput, TOutput], str], *, input: Optional[TInput] = None, instance_id: Optional[str] = None, diff --git a/durabletask/aio/internal/grpc_interceptor.py b/durabletask/aio/internal/grpc_interceptor.py index 06dae95..bf1ac98 100644 --- a/durabletask/aio/internal/grpc_interceptor.py +++ b/durabletask/aio/internal/grpc_interceptor.py @@ -1,3 +1,6 @@ +# Copyright (c) The Dapr Authors. +# Licensed under the MIT License. + from collections import namedtuple from grpc import aio as grpc_aio diff --git a/durabletask/aio/internal/shared.py b/durabletask/aio/internal/shared.py index 3e09ff3..6bdb256 100644 --- a/durabletask/aio/internal/shared.py +++ b/durabletask/aio/internal/shared.py @@ -1,3 +1,6 @@ +# Copyright (c) The Dapr Authors. +# Licensed under the MIT License. + from typing import Optional, Sequence, Union import grpc diff --git a/tests/durabletask/test_client_async.py b/tests/durabletask/test_client_async.py index 6e2b919..8f2b83e 100644 --- a/tests/durabletask/test_client_async.py +++ b/tests/durabletask/test_client_async.py @@ -1,3 +1,6 @@ +# Copyright (c) The Dapr Authors. +# Licensed under the MIT License. + from unittest.mock import ANY, patch from durabletask.aio.internal.grpc_interceptor import DefaultClientInterceptorImpl diff --git a/tests/durabletask/test_orchestration_e2e_async.py b/tests/durabletask/test_orchestration_e2e_async.py index eab2135..de586f1 100644 --- a/tests/durabletask/test_orchestration_e2e_async.py +++ b/tests/durabletask/test_orchestration_e2e_async.py @@ -1,3 +1,6 @@ +# Copyright (c) The Dapr Authors. +# Licensed under the MIT License. + import asyncio import json import threading @@ -235,34 +238,33 @@ def orchestrator(ctx: task.OrchestrationContext, _): # there could be a race condition if the workflow is scheduled before orchestrator is started await asyncio.sleep(0.2) - client = AsyncTaskHubGrpcClient() - id = await client.schedule_new_orchestration(orchestrator) - state = await client.wait_for_orchestration_start(id, timeout=30) - assert state is not None - - # Suspend the orchestration and wait for it to go into the SUSPENDED state - await client.suspend_orchestration(id) - while state.runtime_status == OrchestrationStatus.RUNNING: - await asyncio.sleep(0.1) - state = await client.get_orchestration_state(id) + async with AsyncTaskHubGrpcClient() as client: + id = await client.schedule_new_orchestration(orchestrator) + state = await client.wait_for_orchestration_start(id, timeout=30) assert state is not None - assert state.runtime_status == OrchestrationStatus.SUSPENDED - # Raise an event to the orchestration and confirm that it does NOT complete - await client.raise_orchestration_event(id, "my_event", data=42) - try: - state = await client.wait_for_orchestration_completion(id, timeout=3) - assert False, "Orchestration should not have completed" - except TimeoutError: - pass - - # Resume the orchestration and wait for it to complete - await client.resume_orchestration(id) - state = await client.wait_for_orchestration_completion(id, timeout=30) - assert state is not None - assert state.runtime_status == OrchestrationStatus.COMPLETED - assert state.serialized_output == json.dumps(42) - await client.aclose() + # Suspend the orchestration and wait for it to go into the SUSPENDED state + await client.suspend_orchestration(id) + while state.runtime_status == OrchestrationStatus.RUNNING: + await asyncio.sleep(0.1) + state = await client.get_orchestration_state(id) + assert state is not None + assert state.runtime_status == OrchestrationStatus.SUSPENDED + + # Raise an event to the orchestration and confirm that it does NOT complete + await client.raise_orchestration_event(id, "my_event", data=42) + try: + state = await client.wait_for_orchestration_completion(id, timeout=3) + assert False, "Orchestration should not have completed" + except TimeoutError: + pass + + # Resume the orchestration and wait for it to complete + await client.resume_orchestration(id) + state = await client.wait_for_orchestration_completion(id, timeout=30) + assert state is not None + assert state.runtime_status == OrchestrationStatus.COMPLETED + assert state.serialized_output == json.dumps(42) async def test_terminate(): @@ -275,18 +277,17 @@ def orchestrator(ctx: task.OrchestrationContext, _): w.add_orchestrator(orchestrator) w.start() - client = AsyncTaskHubGrpcClient() - id = await client.schedule_new_orchestration(orchestrator) - state = await client.wait_for_orchestration_start(id, timeout=30) - assert state is not None - assert state.runtime_status == OrchestrationStatus.RUNNING + async with AsyncTaskHubGrpcClient() as client: + id = await client.schedule_new_orchestration(orchestrator) + state = await client.wait_for_orchestration_start(id, timeout=30) + assert state is not None + assert state.runtime_status == OrchestrationStatus.RUNNING - await client.terminate_orchestration(id, output="some reason for termination") - state = await client.wait_for_orchestration_completion(id, timeout=30) - assert state is not None - assert state.runtime_status == OrchestrationStatus.TERMINATED - assert state.serialized_output == json.dumps("some reason for termination") - await client.aclose() + await client.terminate_orchestration(id, output="some reason for termination") + state = await client.wait_for_orchestration_completion(id, timeout=30) + assert state is not None + assert state.runtime_status == OrchestrationStatus.TERMINATED + assert state.serialized_output == json.dumps("some reason for termination") async def test_terminate_recursive(): @@ -304,27 +305,26 @@ def child(ctx: task.OrchestrationContext, _): w.add_orchestrator(child) w.start() - client = AsyncTaskHubGrpcClient() - id = await client.schedule_new_orchestration(root) - state = await client.wait_for_orchestration_start(id, timeout=30) - assert state is not None - assert state.runtime_status == OrchestrationStatus.RUNNING + async with AsyncTaskHubGrpcClient() as client: + id = await client.schedule_new_orchestration(root) + state = await client.wait_for_orchestration_start(id, timeout=30) + assert state is not None + assert state.runtime_status == OrchestrationStatus.RUNNING - # Terminate root orchestration(recursive set to True by default) - await client.terminate_orchestration(id, output="some reason for termination") - state = await client.wait_for_orchestration_completion(id, timeout=30) - assert state is not None - assert state.runtime_status == OrchestrationStatus.TERMINATED + # Terminate root orchestration(recursive set to True by default) + await client.terminate_orchestration(id, output="some reason for termination") + state = await client.wait_for_orchestration_completion(id, timeout=30) + assert state is not None + assert state.runtime_status == OrchestrationStatus.TERMINATED - # Verify that child orchestration is also terminated - await client.wait_for_orchestration_completion(id, timeout=30) - assert state is not None - assert state.runtime_status == OrchestrationStatus.TERMINATED + # Verify that child orchestration is also terminated + await client.wait_for_orchestration_completion(id, timeout=30) + assert state is not None + assert state.runtime_status == OrchestrationStatus.TERMINATED - await client.purge_orchestration(id) - state = await client.get_orchestration_state(id) - assert state is None - await client.aclose() + await client.purge_orchestration(id) + state = await client.get_orchestration_state(id) + assert state is None async def test_continue_as_new(): @@ -347,21 +347,20 @@ def orchestrator(ctx: task.OrchestrationContext, input: int): w.add_orchestrator(orchestrator) w.start() - client = AsyncTaskHubGrpcClient() - id = await client.schedule_new_orchestration(orchestrator, input=0) - await client.raise_orchestration_event(id, "my_event", data=1) - await client.raise_orchestration_event(id, "my_event", data=2) - await client.raise_orchestration_event(id, "my_event", data=3) - await client.raise_orchestration_event(id, "my_event", data=4) - await client.raise_orchestration_event(id, "my_event", data=5) + async with AsyncTaskHubGrpcClient() as client: + id = await client.schedule_new_orchestration(orchestrator, input=0) + await client.raise_orchestration_event(id, "my_event", data=1) + await client.raise_orchestration_event(id, "my_event", data=2) + await client.raise_orchestration_event(id, "my_event", data=3) + await client.raise_orchestration_event(id, "my_event", data=4) + await client.raise_orchestration_event(id, "my_event", data=5) - state = await client.wait_for_orchestration_completion(id, timeout=30) - assert state is not None - assert state.runtime_status == OrchestrationStatus.COMPLETED - assert state.serialized_output == json.dumps(all_results) - assert state.serialized_input == json.dumps(4) - assert all_results == [1, 2, 3, 4, 5] - await client.aclose() + state = await client.wait_for_orchestration_completion(id, timeout=30) + assert state is not None + assert state.runtime_status == OrchestrationStatus.COMPLETED + assert state.serialized_output == json.dumps(all_results) + assert state.serialized_input == json.dumps(4) + assert all_results == [1, 2, 3, 4, 5] async def test_retry_policies(): @@ -405,19 +404,18 @@ def throw_activity_with_retry(ctx: task.ActivityContext, _): w.add_activity(throw_activity_with_retry) w.start() - client = AsyncTaskHubGrpcClient() - id = await client.schedule_new_orchestration(parent_orchestrator_with_retry) - state = await client.wait_for_orchestration_completion(id, timeout=30) - assert state is not None - assert state.runtime_status == OrchestrationStatus.FAILED - assert state.failure_details is not None - assert state.failure_details.error_type == "TaskFailedError" - assert state.failure_details.message.startswith("Sub-orchestration task #1 failed:") - assert state.failure_details.message.endswith("Activity task #1 failed: Kah-BOOOOM!!!") - assert state.failure_details.stack_trace is not None - assert throw_activity_counter == 9 - assert child_orch_counter == 3 - await client.aclose() + async with AsyncTaskHubGrpcClient() as client: + id = await client.schedule_new_orchestration(parent_orchestrator_with_retry) + state = await client.wait_for_orchestration_completion(id, timeout=30) + assert state is not None + assert state.runtime_status == OrchestrationStatus.FAILED + assert state.failure_details is not None + assert state.failure_details.error_type == "TaskFailedError" + assert state.failure_details.message.startswith("Sub-orchestration task #1 failed:") + assert state.failure_details.message.endswith("Activity task #1 failed: Kah-BOOOOM!!!") + assert state.failure_details.stack_trace is not None + assert throw_activity_counter == 9 + assert child_orch_counter == 3 async def test_retry_timeout(): @@ -446,17 +444,16 @@ def throw_activity(ctx: task.ActivityContext, _): w.add_activity(throw_activity) w.start() - client = AsyncTaskHubGrpcClient() - id = await client.schedule_new_orchestration(mock_orchestrator) - state = await client.wait_for_orchestration_completion(id, timeout=30) - assert state is not None - assert state.runtime_status == OrchestrationStatus.FAILED - assert state.failure_details is not None - assert state.failure_details.error_type == "TaskFailedError" - assert state.failure_details.message.endswith("Activity task #1 failed: Kah-BOOOOM!!!") - assert state.failure_details.stack_trace is not None - assert throw_activity_counter == 4 - await client.aclose() + async with AsyncTaskHubGrpcClient() as client: + id = await client.schedule_new_orchestration(mock_orchestrator) + state = await client.wait_for_orchestration_completion(id, timeout=30) + assert state is not None + assert state.runtime_status == OrchestrationStatus.FAILED + assert state.failure_details is not None + assert state.failure_details.error_type == "TaskFailedError" + assert state.failure_details.message.endswith("Activity task #1 failed: Kah-BOOOOM!!!") + assert state.failure_details.stack_trace is not None + assert throw_activity_counter == 4 async def test_custom_status(): @@ -469,10 +466,9 @@ def empty_orchestrator(ctx: task.OrchestrationContext, _): w.add_orchestrator(empty_orchestrator) w.start() - c = AsyncTaskHubGrpcClient() - id = await c.schedule_new_orchestration(empty_orchestrator) - state = await c.wait_for_orchestration_completion(id, timeout=30) - await c.aclose() + async with AsyncTaskHubGrpcClient() as client: + id = await client.schedule_new_orchestration(empty_orchestrator) + state = await client.wait_for_orchestration_completion(id, timeout=30) assert state is not None assert state.name == task.get_name(empty_orchestrator) diff --git a/tests/durabletask/test_task.py b/tests/durabletask/test_task.py index 914df5b..81cc8a2 100644 --- a/tests/durabletask/test_task.py +++ b/tests/durabletask/test_task.py @@ -1,3 +1,6 @@ +# Copyright (c) The Dapr Authors. +# Licensed under the MIT License. + """Unit tests for durabletask.task primitives.""" from durabletask import task @@ -10,6 +13,7 @@ def test_when_all_empty_returns_successfully(): assert when_all_task.is_complete assert when_all_task.get_result() == [] + def test_when_any_empty_returns_successfully(): """task.when_any([]) should complete immediately and return an empty list.""" when_any_task = task.when_any([]) @@ -64,4 +68,3 @@ def test_when_any_happy_path_returns_winner_task_and_completes_on_first(): a.complete("A") assert any_task.get_result() is b - From 3d8528d423af0bb2794d482a521d7831b24d1dd0 Mon Sep 17 00:00:00 2001 From: Patrick Assuied Date: Wed, 1 Oct 2025 12:36:22 -0700 Subject: [PATCH 41/81] reverting dependency updates and readme changes Signed-off-by: Patrick Assuied --- README.md | 2 -- dev-requirements.txt | 6 +----- requirements.txt | 6 +++++- 3 files changed, 6 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 3f691c0..4a45d9b 100644 --- a/README.md +++ b/README.md @@ -173,7 +173,6 @@ This will download the `orchestrator_service.proto` from the `microsoft/durablet Unit tests can be run using the following command from the project root. Unit tests _don't_ require a sidecar process to be running. ```sh -pip3 install -r dev-requirements.txt make test-unit ``` @@ -189,7 +188,6 @@ durabletask-go --port 4001 To run the E2E tests, run the following command from the project root: ```sh -pip3 install -r dev-requirements.txt make test-e2e ``` diff --git a/dev-requirements.txt b/dev-requirements.txt index 80d1ba7..ba589ab 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -1,5 +1 @@ -grpcio-tools>=1.75.1 # supports protobuf 6.x and aligns with generated code -pytest -pytest-cov -pytest-asyncio -flake8 \ No newline at end of file +grpcio-tools==1.62.3 # 1.62.X is the latest version before protobuf 1.26.X is used which has breaking changes for Python # supports protobuf 6.x and aligns with generated code \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 0f47c7f..06750e2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,8 @@ autopep8 grpcio>=1.60.0 # 1.60.0 is the version introducing protobuf 1.25.X support, newer versions are backwards compatible -protobuf>=6,<7 +protobuf asyncio +pytest +pytest-cov +pytest-asyncio +flake8 \ No newline at end of file From 9bba47962a86772f00ff544b026c345dc901ad24 Mon Sep 17 00:00:00 2001 From: Filinto Duran <1373693+filintod@users.noreply.github.com> Date: Fri, 24 Oct 2025 16:15:37 -0500 Subject: [PATCH 42/81] fix continue as new bug where we missed router on the multi-app Signed-off-by: Filinto Duran <1373693+filintod@users.noreply.github.com> --- durabletask/client.py | 6 +++- durabletask/internal/helpers.py | 21 +++++++---- durabletask/internal/shared.py | 24 ++++++++++++- durabletask/worker.py | 11 +++--- tests/durabletask/test_orchestration_e2e.py | 39 +++++++++++++++++++++ 5 files changed, 88 insertions(+), 13 deletions(-) diff --git a/durabletask/client.py b/durabletask/client.py index 7a72e1a..b155bd6 100644 --- a/durabletask/client.py +++ b/durabletask/client.py @@ -127,10 +127,14 @@ def schedule_new_orchestration(self, orchestrator: Union[task.Orchestrator[TInpu name = orchestrator if isinstance(orchestrator, str) else task.get_name(orchestrator) + input_pb = ( + wrappers_pb2.StringValue(value=shared.to_json(input)) if input is not None else None + ) + req = pb.CreateInstanceRequest( name=name, instanceId=instance_id if instance_id else uuid.uuid4().hex, - input=wrappers_pb2.StringValue(value=shared.to_json(input)) if input is not None else None, + input=input_pb, scheduledStartTimestamp=helpers.new_timestamp(start_at) if start_at else None, version=wrappers_pb2.StringValue(value=""), orchestrationIdReusePolicy=reuse_id_policy, diff --git a/durabletask/internal/helpers.py b/durabletask/internal/helpers.py index 48ab14b..682ab89 100644 --- a/durabletask/internal/helpers.py +++ b/durabletask/internal/helpers.py @@ -158,18 +158,25 @@ def get_string_value(val: Optional[str]) -> Optional[wrappers_pb2.StringValue]: def new_complete_orchestration_action( - id: int, - status: pb.OrchestrationStatus, - result: Optional[str] = None, - failure_details: Optional[pb.TaskFailureDetails] = None, - carryover_events: Optional[list[pb.HistoryEvent]] = None) -> pb.OrchestratorAction: + id: int, + status: pb.OrchestrationStatus, + result: Optional[str] = None, + failure_details: Optional[pb.TaskFailureDetails] = None, + carryover_events: Optional[list[pb.HistoryEvent]] = None, + router: Optional[pb.TaskRouter] = None, +) -> pb.OrchestratorAction: completeOrchestrationAction = pb.CompleteOrchestrationAction( orchestrationStatus=status, result=get_string_value(result), failureDetails=failure_details, - carryoverEvents=carryover_events) + carryoverEvents=carryover_events, + ) - return pb.OrchestratorAction(id=id, completeOrchestration=completeOrchestrationAction) + return pb.OrchestratorAction( + id=id, + completeOrchestration=completeOrchestrationAction, + router=router, + ) def new_create_timer_action(id: int, fire_at: datetime) -> pb.OrchestratorAction: diff --git a/durabletask/internal/shared.py b/durabletask/internal/shared.py index c0fbe74..22ac3df 100644 --- a/durabletask/internal/shared.py +++ b/durabletask/internal/shared.py @@ -13,7 +13,7 @@ grpc.UnaryUnaryClientInterceptor, grpc.UnaryStreamClientInterceptor, grpc.StreamUnaryClientInterceptor, - grpc.StreamStreamClientInterceptor + grpc.StreamStreamClientInterceptor, ] # Field name used to indicate that an object was automatically serialized @@ -25,6 +25,28 @@ def get_default_host_address() -> str: + """Resolve the default Durable Task sidecar address. + + Honors environment variables if present; otherwise defaults to localhost:4001. + + Supported environment variables (checked in order): + - DURABLETASK_GRPC_ENDPOINT (e.g., "localhost:4001", "grpcs://host:443") + - DURABLETASK_GRPC_HOST and DURABLETASK_GRPC_PORT + """ + import os + + # Full endpoint overrides + endpoint = os.environ.get("DAPR_GRPC_ENDPOINT") + if endpoint: + return endpoint + + # Host/port split overrides + host = os.environ.get("DAPR_GRPC_HOST") or os.environ.get("DAPR_RUNTIME_HOST") + if host: + port = os.environ.get("DAPR_GRPC_PORT", "4001") + return f"{host}:{port}" + + # Default to durabletask-go default port return "localhost:4001" diff --git a/durabletask/worker.py b/durabletask/worker.py index e8e1fa9..695dc44 100644 --- a/durabletask/worker.py +++ b/durabletask/worker.py @@ -643,7 +643,10 @@ def set_complete( if result is not None: result_json = result if is_result_encoded else shared.to_json(result) action = ph.new_complete_orchestration_action( - self.next_sequence_number(), status, result_json + self.next_sequence_number(), + status, + result_json, + router=pb.TaskRouter(sourceAppID=self._app_id) if self._app_id else None, ) self._pending_actions[action.id] = action @@ -660,6 +663,7 @@ def set_failed(self, ex: Exception): pb.ORCHESTRATION_STATUS_FAILED, None, ph.new_failure_details(ex), + router=pb.TaskRouter(sourceAppID=self._app_id) if self._app_id else None, ) self._pending_actions[action.id] = action @@ -692,11 +696,10 @@ def get_actions(self) -> list[pb.OrchestratorAction]: action = ph.new_complete_orchestration_action( self.next_sequence_number(), pb.ORCHESTRATION_STATUS_CONTINUED_AS_NEW, - result=shared.to_json(self._new_input) - if self._new_input is not None - else None, + result=shared.to_json(self._new_input) if self._new_input is not None else None, failure_details=None, carryover_events=carryover_events, + router=pb.TaskRouter(sourceAppID=self._app_id) if self._app_id else None, ) return [action] else: diff --git a/tests/durabletask/test_orchestration_e2e.py b/tests/durabletask/test_orchestration_e2e.py index 76ec355..f5651ff 100644 --- a/tests/durabletask/test_orchestration_e2e.py +++ b/tests/durabletask/test_orchestration_e2e.py @@ -366,6 +366,45 @@ def orchestrator(ctx: task.OrchestrationContext, input: int): assert all_results == [1, 2, 3, 4, 5] +def test_continue_as_new_with_activity_e2e(): + """E2E test for continue_as_new with activities (generator-based).""" + activity_results = [] + + def double_activity(ctx: task.ActivityContext, value: int) -> int: + """Activity that doubles the value.""" + result = value * 2 + activity_results.append(result) + return result + + def orchestrator(ctx: task.OrchestrationContext, counter: int): + # Call activity to process the counter + processed = yield ctx.call_activity(double_activity, input=counter) + + # Continue as new up to 3 times + if counter < 3: + ctx.continue_as_new(counter + 1, save_events=False) + else: + return {"counter": counter, "processed": processed, "all_results": activity_results} + + with worker.TaskHubGrpcWorker() as w: + w.add_activity(double_activity) + w.add_orchestrator(orchestrator) + w.start() + + task_hub_client = client.TaskHubGrpcClient() + id = task_hub_client.schedule_new_orchestration(orchestrator, input=1) + + state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) + assert state is not None + assert state.runtime_status == client.OrchestrationStatus.COMPLETED + + output = json.loads(state.serialized_output) + # Should have called activity 3 times with input values 1, 2, 3 + assert activity_results == [2, 4, 6] + assert output["counter"] == 3 + assert output["processed"] == 6 + + # NOTE: This test fails when running against durabletask-go with sqlite because the sqlite backend does not yet # support orchestration ID reuse. This gap is being tracked here: # https://github.com/microsoft/durabletask-go/issues/42 From 6ac8d147a01ca59a816dd02aa3a51dbe3261265a Mon Sep 17 00:00:00 2001 From: Filinto Duran <1373693+filintod@users.noreply.github.com> Date: Mon, 27 Oct 2025 23:09:02 -0500 Subject: [PATCH 43/81] add options to client argument and e2e test Signed-off-by: Filinto Duran <1373693+filintod@users.noreply.github.com> --- durabletask/aio/client.py | 23 +++-- durabletask/aio/internal/shared.py | 51 +++++++--- durabletask/client.py | 23 +++-- durabletask/internal/shared.py | 39 ++++++-- .../test_grpc_aio_channel_options.py | 94 +++++++++++++++++++ .../durabletask/test_grpc_channel_options.py | 81 ++++++++++++++++ tests/durabletask/test_orchestration_e2e.py | 7 +- 7 files changed, 275 insertions(+), 43 deletions(-) create mode 100644 tests/durabletask/test_grpc_aio_channel_options.py create mode 100644 tests/durabletask/test_grpc_channel_options.py diff --git a/durabletask/aio/client.py b/durabletask/aio/client.py index 4ec9bbf..a295c7f 100644 --- a/durabletask/aio/client.py +++ b/durabletask/aio/client.py @@ -20,15 +20,17 @@ class AsyncTaskHubGrpcClient: - - def __init__(self, *, - host_address: Optional[str] = None, - metadata: Optional[list[tuple[str, str]]] = None, - log_handler: Optional[logging.Handler] = None, - log_formatter: Optional[logging.Formatter] = None, - secure_channel: bool = False, - interceptors: Optional[Sequence[ClientInterceptor]] = None): - + def __init__( + self, + *, + host_address: Optional[str] = None, + metadata: Optional[list[tuple[str, str]]] = None, + log_handler: Optional[logging.Handler] = None, + log_formatter: Optional[logging.Formatter] = None, + secure_channel: bool = False, + interceptors: Optional[Sequence[ClientInterceptor]] = None, + channel_options: Optional[Sequence[tuple[str, Any]]] = None, + ): if interceptors is not None: interceptors = list(interceptors) if metadata is not None: @@ -41,7 +43,8 @@ def __init__(self, *, channel = get_grpc_aio_channel( host_address=host_address, secure_channel=secure_channel, - interceptors=interceptors + interceptors=interceptors, + options=channel_options, ) self._channel = channel self._stub = stubs.TaskHubSidecarServiceStub(channel) diff --git a/durabletask/aio/internal/shared.py b/durabletask/aio/internal/shared.py index 6bdb256..87266a1 100644 --- a/durabletask/aio/internal/shared.py +++ b/durabletask/aio/internal/shared.py @@ -1,49 +1,72 @@ # Copyright (c) The Dapr Authors. # Licensed under the MIT License. -from typing import Optional, Sequence, Union +from typing import Any, Optional, Sequence, Union import grpc from grpc import aio as grpc_aio from durabletask.internal.shared import ( - get_default_host_address, - SECURE_PROTOCOLS, INSECURE_PROTOCOLS, + SECURE_PROTOCOLS, + get_default_host_address, ) - ClientInterceptor = Union[ grpc_aio.UnaryUnaryClientInterceptor, grpc_aio.UnaryStreamClientInterceptor, grpc_aio.StreamUnaryClientInterceptor, - grpc_aio.StreamStreamClientInterceptor + grpc_aio.StreamStreamClientInterceptor, ] def get_grpc_aio_channel( - host_address: Optional[str], - secure_channel: bool = False, - interceptors: Optional[Sequence[ClientInterceptor]] = None) -> grpc_aio.Channel: + host_address: Optional[str], + secure_channel: bool = False, + interceptors: Optional[Sequence[ClientInterceptor]] = None, + options: Optional[Sequence[tuple[str, Any]]] = None, +) -> grpc_aio.Channel: + """create a grpc asyncio channel + Args: + host_address: The host address of the gRPC server. If None, uses the default address. + secure_channel: Whether to use a secure channel (TLS/SSL). Defaults to False. + interceptors: Optional sequence of client interceptors to apply to the channel. + options: Optional sequence of gRPC channel options as (key, value) tuples. Keys defined in https://grpc.github.io/grpc/core/group__grpc__arg__keys.html + """ if host_address is None: host_address = get_default_host_address() for protocol in SECURE_PROTOCOLS: if host_address.lower().startswith(protocol): secure_channel = True - host_address = host_address[len(protocol):] + host_address = host_address[len(protocol) :] break for protocol in INSECURE_PROTOCOLS: if host_address.lower().startswith(protocol): secure_channel = False - host_address = host_address[len(protocol):] + host_address = host_address[len(protocol) :] break + # Create the base channel if secure_channel: - channel = grpc_aio.secure_channel(host_address, grpc.ssl_channel_credentials(), interceptors=interceptors) - else: - channel = grpc_aio.insecure_channel(host_address, interceptors=interceptors) + if options is not None: + return grpc_aio.secure_channel( + host_address, + grpc.ssl_channel_credentials(), + interceptors=interceptors, + options=options, + ) + return grpc_aio.secure_channel( + host_address, grpc.ssl_channel_credentials(), interceptors=interceptors + ) - return channel + if options is not None: + # validate all options keys prefix starts with `grpc.` + if not all(key.startswith('grpc.') for key, _ in options): + raise ValueError( + f'All options keys must start with `grpc.`. Invalid options: {options}' + ) + return grpc_aio.insecure_channel(host_address, interceptors=interceptors, options=options) + return grpc_aio.insecure_channel(host_address, interceptors=interceptors) diff --git a/durabletask/client.py b/durabletask/client.py index b155bd6..0c884f4 100644 --- a/durabletask/client.py +++ b/durabletask/client.py @@ -91,15 +91,17 @@ def new_orchestration_state(instance_id: str, res: pb.GetInstanceResponse) -> Op class TaskHubGrpcClient: - - def __init__(self, *, - host_address: Optional[str] = None, - metadata: Optional[list[tuple[str, str]]] = None, - log_handler: Optional[logging.Handler] = None, - log_formatter: Optional[logging.Formatter] = None, - secure_channel: bool = False, - interceptors: Optional[Sequence[shared.ClientInterceptor]] = None): - + def __init__( + self, + *, + host_address: Optional[str] = None, + metadata: Optional[list[tuple[str, str]]] = None, + log_handler: Optional[logging.Handler] = None, + log_formatter: Optional[logging.Formatter] = None, + secure_channel: bool = False, + interceptors: Optional[Sequence[shared.ClientInterceptor]] = None, + channel_options: Optional[Sequence[tuple[str, Any]]] = None, + ): # If the caller provided metadata, we need to create a new interceptor for it and # add it to the list of interceptors. if interceptors is not None: @@ -114,7 +116,8 @@ def __init__(self, *, channel = shared.get_grpc_channel( host_address=host_address, secure_channel=secure_channel, - interceptors=interceptors + interceptors=interceptors, + options=channel_options, ) self._stub = stubs.TaskHubSidecarServiceStub(channel) self._logger = shared.get_logger("client", log_handler, log_formatter) diff --git a/durabletask/internal/shared.py b/durabletask/internal/shared.py index 22ac3df..d29ecc4 100644 --- a/durabletask/internal/shared.py +++ b/durabletask/internal/shared.py @@ -51,9 +51,19 @@ def get_default_host_address() -> str: def get_grpc_channel( - host_address: Optional[str], - secure_channel: bool = False, - interceptors: Optional[Sequence[ClientInterceptor]] = None) -> grpc.Channel: + host_address: Optional[str], + secure_channel: bool = False, + interceptors: Optional[Sequence[ClientInterceptor]] = None, + options: Optional[Sequence[tuple[str, Any]]] = None, +) -> grpc.Channel: + """create a grpc channel + + Args: + host_address: The host address of the gRPC server. If None, uses the default address. + secure_channel: Whether to use a secure channel (TLS/SSL). Defaults to False. + interceptors: Optional sequence of client interceptors to apply to the channel. + options: Optional sequence of gRPC channel options as (key, value) tuples. Keys defined in https://grpc.github.io/grpc/core/group__grpc__arg__keys.html + """ if host_address is None: host_address = get_default_host_address() @@ -61,21 +71,34 @@ def get_grpc_channel( if host_address.lower().startswith(protocol): secure_channel = True # remove the protocol from the host name - host_address = host_address[len(protocol):] + host_address = host_address[len(protocol) :] break for protocol in INSECURE_PROTOCOLS: if host_address.lower().startswith(protocol): secure_channel = False # remove the protocol from the host name - host_address = host_address[len(protocol):] + host_address = host_address[len(protocol) :] break # Create the base channel - if secure_channel: - channel = grpc.secure_channel(host_address, grpc.ssl_channel_credentials()) + if options is not None: + # validate all options keys prefix starts with `grpc.` + if not all(key.startswith('grpc.') for key, _ in options): + raise ValueError( + f'All options keys must start with `grpc.`. Invalid options: {options}' + ) + if secure_channel: + channel = grpc.secure_channel( + host_address, grpc.ssl_channel_credentials(), options=options + ) + else: + channel = grpc.insecure_channel(host_address, options=options) else: - channel = grpc.insecure_channel(host_address) + if secure_channel: + channel = grpc.secure_channel(host_address, grpc.ssl_channel_credentials()) + else: + channel = grpc.insecure_channel(host_address) # Apply interceptors ONLY if they exist if interceptors: diff --git a/tests/durabletask/test_grpc_aio_channel_options.py b/tests/durabletask/test_grpc_aio_channel_options.py new file mode 100644 index 0000000..54830c8 --- /dev/null +++ b/tests/durabletask/test_grpc_aio_channel_options.py @@ -0,0 +1,94 @@ +import json +from unittest.mock import patch + +import pytest + +from durabletask.aio.internal.shared import get_grpc_aio_channel + +HOST_ADDRESS = 'localhost:50051' + + +def _find_option(options, key): + for k, v in options: + if k == key: + return v + raise AssertionError(f'Option with key {key} not found in options: {options}') + + +def test_aio_channel_passes_base_options_and_max_lengths(): + base_options = [ + ('grpc.max_send_message_length', 4321), + ('grpc.max_receive_message_length', 8765), + ('grpc.primary_user_agent', 'durabletask-aio-tests'), + ] + with patch('durabletask.aio.internal.shared.grpc_aio.insecure_channel') as mock_channel: + get_grpc_aio_channel(HOST_ADDRESS, False, options=base_options) + # Ensure called with options kwarg + assert mock_channel.call_count == 1 + args, kwargs = mock_channel.call_args + assert args[0] == HOST_ADDRESS + assert 'options' in kwargs + opts = kwargs['options'] + # Check our base options made it through + assert ('grpc.max_send_message_length', 4321) in opts + assert ('grpc.max_receive_message_length', 8765) in opts + assert ('grpc.primary_user_agent', 'durabletask-aio-tests') in opts + + +def test_aio_channel_merges_env_keepalive_and_retry(monkeypatch: pytest.MonkeyPatch): + # retry grpc option + # service_config ref => https://github.com/grpc/grpc-proto/blob/master/grpc/service_config/service_config.proto#L44 + max_attempts = 4 + initial_backoff_ms = 250 + max_backoff_ms = 2000 + backoff_multiplier = 1.5 + codes = ['RESOURCE_EXHAUSTED'] + service_config = { + 'methodConfig': [ + { + 'name': [{'service': ''}], # match all services/methods + 'retryPolicy': { + 'maxAttempts': max_attempts, + 'initialBackoff': f'{initial_backoff_ms / 1000.0}s', + 'maxBackoff': f'{max_backoff_ms / 1000.0}s', + 'backoffMultiplier': backoff_multiplier, + 'retryableStatusCodes': codes, + }, + } + ] + } + + base_options = [('grpc.service_config', json.dumps(service_config))] + + with patch('durabletask.aio.internal.shared.grpc_aio.insecure_channel') as mock_channel: + get_grpc_aio_channel(HOST_ADDRESS, False, options=base_options) + + args, kwargs = mock_channel.call_args + assert args[0] == HOST_ADDRESS + assert 'options' in kwargs + opts = kwargs['options'] + + # Retry service config present and parses correctly + svc_cfg_str = _find_option(opts, 'grpc.service_config') + svc_cfg = json.loads(svc_cfg_str) + assert 'methodConfig' in svc_cfg and isinstance(svc_cfg['methodConfig'], list) + retry_policy = svc_cfg['methodConfig'][0]['retryPolicy'] + assert retry_policy['maxAttempts'] == 4 + assert retry_policy['initialBackoff'] == f'{250 / 1000.0}s' + assert retry_policy['maxBackoff'] == f'{2000 / 1000.0}s' + assert retry_policy['backoffMultiplier'] == 1.5 + # Codes are upper-cased list + assert 'RESOURCE_EXHAUSTED' in retry_policy['retryableStatusCodes'] + + +def test_aio_secure_channel_receives_options_when_secure_true(): + base_options = [('grpc.max_receive_message_length', 999999)] + with ( + patch('durabletask.aio.internal.shared.grpc_aio.secure_channel') as mock_channel, + patch('grpc.ssl_channel_credentials') as mock_credentials, + ): + get_grpc_aio_channel(HOST_ADDRESS, True, options=base_options) + args, kwargs = mock_channel.call_args + assert args[0] == HOST_ADDRESS + assert args[1] == mock_credentials.return_value + assert ('grpc.max_receive_message_length', 999999) in kwargs.get('options', []) diff --git a/tests/durabletask/test_grpc_channel_options.py b/tests/durabletask/test_grpc_channel_options.py new file mode 100644 index 0000000..b8ac533 --- /dev/null +++ b/tests/durabletask/test_grpc_channel_options.py @@ -0,0 +1,81 @@ +import json +from unittest.mock import ANY, patch + +import pytest + +from durabletask.internal.shared import get_grpc_channel + +HOST_ADDRESS = 'localhost:50051' + + +def _find_option(options, key): + for k, v in options: + if k == key: + return v + raise AssertionError(f'Option with key {key} not found in options: {options}') + + +def test_sync_channel_passes_base_options_and_max_lengths(): + base_options = [ + ('grpc.max_send_message_length', 1234), + ('grpc.max_receive_message_length', 5678), + ('grpc.primary_user_agent', 'durabletask-tests'), + ] + with patch('grpc.insecure_channel') as mock_channel: + get_grpc_channel(HOST_ADDRESS, False, options=base_options) + # Ensure called with options kwarg + assert mock_channel.call_count == 1 + args, kwargs = mock_channel.call_args + assert args[0] == HOST_ADDRESS + assert 'options' in kwargs + opts = kwargs['options'] + # Check our base options made it through + assert ('grpc.max_send_message_length', 1234) in opts + assert ('grpc.max_receive_message_length', 5678) in opts + assert ('grpc.primary_user_agent', 'durabletask-tests') in opts + + +def test_sync_channel_merges_env_keepalive_and_retry(monkeypatch: pytest.MonkeyPatch): + # retry grpc option + # service_config ref => https://github.com/grpc/grpc-proto/blob/master/grpc/service_config/service_config.proto#L44 + max_attempts = 4 + initial_backoff_ms = 250 + max_backoff_ms = 2000 + backoff_multiplier = 1.5 + codes = ['ABORTED'] + service_config = { + 'methodConfig': [ + { + 'name': [{'service': ''}], # match all services/methods + 'retryPolicy': { + 'maxAttempts': max_attempts, + 'initialBackoff': f'{initial_backoff_ms / 1000.0}s', + 'maxBackoff': f'{max_backoff_ms / 1000.0}s', + 'backoffMultiplier': backoff_multiplier, + 'retryableStatusCodes': codes, + }, + } + ] + } + + base_options = [('grpc.service_config', json.dumps(service_config))] + + with patch('grpc.insecure_channel') as mock_channel: + get_grpc_channel(HOST_ADDRESS, False, options=base_options) + + args, kwargs = mock_channel.call_args + assert args[0] == HOST_ADDRESS + assert 'options' in kwargs + opts = kwargs['options'] + + # Retry service config present and parses correctly + svc_cfg_str = _find_option(opts, 'grpc.service_config') + svc_cfg = json.loads(svc_cfg_str) + assert 'methodConfig' in svc_cfg and isinstance(svc_cfg['methodConfig'], list) + retry_policy = svc_cfg['methodConfig'][0]['retryPolicy'] + assert retry_policy['maxAttempts'] == 4 + assert retry_policy['initialBackoff'] == f'{250 / 1000.0}s' + assert retry_policy['maxBackoff'] == f'{2000 / 1000.0}s' + assert retry_policy['backoffMultiplier'] == 1.5 + # Codes are upper-cased list + assert 'ABORTED' in retry_policy['retryableStatusCodes'] diff --git a/tests/durabletask/test_orchestration_e2e.py b/tests/durabletask/test_orchestration_e2e.py index f5651ff..5825d37 100644 --- a/tests/durabletask/test_orchestration_e2e.py +++ b/tests/durabletask/test_orchestration_e2e.py @@ -28,7 +28,12 @@ def empty_orchestrator(ctx: task.OrchestrationContext, _): w.add_orchestrator(empty_orchestrator) w.start() - c = client.TaskHubGrpcClient() + # set a custom max send length option + c = client.TaskHubGrpcClient( + channel_options=[ + ('grpc.max_send_message_length', 1024 * 1024), # 1MB + ] + ) id = c.schedule_new_orchestration(empty_orchestrator) state = c.wait_for_orchestration_completion(id, timeout=30) From e33a8d5c97a9bc8f7734b18d8c66f583e38b99d0 Mon Sep 17 00:00:00 2001 From: Filinto Duran <1373693+filintod@users.noreply.github.com> Date: Mon, 27 Oct 2025 23:13:58 -0500 Subject: [PATCH 44/81] correct docstring info on env var names Signed-off-by: Filinto Duran <1373693+filintod@users.noreply.github.com> --- durabletask/internal/shared.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/durabletask/internal/shared.py b/durabletask/internal/shared.py index d29ecc4..f613b3d 100644 --- a/durabletask/internal/shared.py +++ b/durabletask/internal/shared.py @@ -30,8 +30,8 @@ def get_default_host_address() -> str: Honors environment variables if present; otherwise defaults to localhost:4001. Supported environment variables (checked in order): - - DURABLETASK_GRPC_ENDPOINT (e.g., "localhost:4001", "grpcs://host:443") - - DURABLETASK_GRPC_HOST and DURABLETASK_GRPC_PORT + - DAPR_GRPC_ENDPOINT (e.g., "localhost:4001", "grpcs://host:443") + - DAPR_GRPC_HOST/DAPR_RUNTIME_HOST and DAPR_GRPC_PORT """ import os @@ -59,7 +59,7 @@ def get_grpc_channel( """create a grpc channel Args: - host_address: The host address of the gRPC server. If None, uses the default address. + host_address: The host address of the gRPC server. If None, uses the default address (as defined in get_default_host_address above). secure_channel: Whether to use a secure channel (TLS/SSL). Defaults to False. interceptors: Optional sequence of client interceptors to apply to the channel. options: Optional sequence of gRPC channel options as (key, value) tuples. Keys defined in https://grpc.github.io/grpc/core/group__grpc__arg__keys.html From f3f24fd5ac5b68c5ad0f98de0886d2eeb6d1d566 Mon Sep 17 00:00:00 2001 From: Filinto Duran <1373693+filintod@users.noreply.github.com> Date: Tue, 28 Oct 2025 08:40:38 -0500 Subject: [PATCH 45/81] update protobuf version so it trickles down to python-sdk Signed-off-by: Filinto Duran <1373693+filintod@users.noreply.github.com> --- Makefile | 1 + pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 3c1ed51..5c13901 100644 --- a/Makefile +++ b/Makefile @@ -13,6 +13,7 @@ install: gen-proto: curl -o durabletask/internal/orchestrator_service.proto https://raw.githubusercontent.com/dapr/durabletask-protobuf/refs/heads/main/protos/orchestrator_service.proto curl -H "Accept: application/vnd.github.v3+json" "https://api.github.com/repos/dapr/durabletask-protobuf/commits?path=protos/orchestrator_service.proto&sha=main&per_page=1" | jq -r '.[0].sha' > durabletask/internal/PROTO_SOURCE_COMMIT_HASH + # NOTE: remember to check/update pyproject.toml protobuf version to follow https://github.com/grpc/grpc/blob/v{{VERSION GRPC IO TOOL BELLOW}}/tools/distrib/python/grpcio_tools/setup.py pip install grpcio-tools==1.74.0 python3 -m grpc_tools.protoc --proto_path=. --python_out=. --pyi_out=. --grpc_python_out=. ./durabletask/internal/orchestrator_service.proto rm durabletask/internal/*.proto diff --git a/pyproject.toml b/pyproject.toml index 8c4d1e4..9bcf1bb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -26,7 +26,7 @@ license = {file = "LICENSE"} readme = "README.md" dependencies = [ "grpcio", - "protobuf", + "protobuf>=6.31.1,<7.0.0", # follows grpcio generation version https://github.com/grpc/grpc/blob/v1.74.0/tools/distrib/python/grpcio_tools/setup.py "asyncio" ] From abeb0c31554a002e2dd753fc56a3a2e0f4a67e4b Mon Sep 17 00:00:00 2001 From: Filinto Duran <1373693+filintod@users.noreply.github.com> Date: Wed, 29 Oct 2025 08:04:05 -0500 Subject: [PATCH 46/81] move os import to top level Signed-off-by: Filinto Duran <1373693+filintod@users.noreply.github.com> --- durabletask/internal/shared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/durabletask/internal/shared.py b/durabletask/internal/shared.py index 22ac3df..d15141f 100644 --- a/durabletask/internal/shared.py +++ b/durabletask/internal/shared.py @@ -4,6 +4,7 @@ import dataclasses import json import logging +import os from types import SimpleNamespace from typing import Any, Optional, Sequence, Union @@ -33,7 +34,6 @@ def get_default_host_address() -> str: - DURABLETASK_GRPC_ENDPOINT (e.g., "localhost:4001", "grpcs://host:443") - DURABLETASK_GRPC_HOST and DURABLETASK_GRPC_PORT """ - import os # Full endpoint overrides endpoint = os.environ.get("DAPR_GRPC_ENDPOINT") From c25c2ee849325e229e1c43c5a3cad5d871b7139e Mon Sep 17 00:00:00 2001 From: Albert Callarisa Date: Thu, 30 Oct 2025 22:43:11 +0100 Subject: [PATCH 47/81] Added some linting and tooling update Signed-off-by: Albert Callarisa --- .flake8 | 6 ----- .github/workflows/pr-validation.yml | 13 +++++---- Makefile | 2 +- README.md | 28 +++++++++++++------ pyproject.toml | 42 ++++++++++++++++++++++++++--- requirements.txt | 9 +------ tox.ini | 34 +++++++++++++++++++++++ 7 files changed, 100 insertions(+), 34 deletions(-) delete mode 100644 .flake8 create mode 100644 tox.ini diff --git a/.flake8 b/.flake8 deleted file mode 100644 index ecc399c..0000000 --- a/.flake8 +++ /dev/null @@ -1,6 +0,0 @@ -[flake8] -ignore = E501,C901 -exclude = - .git - *_pb2* - __pycache__ \ No newline at end of file diff --git a/.github/workflows/pr-validation.yml b/.github/workflows/pr-validation.yml index 33de31f..e412ef9 100644 --- a/.github/workflows/pr-validation.yml +++ b/.github/workflows/pr-validation.yml @@ -28,14 +28,13 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip - pip install flake8 pytest pytest-cov pytest-asyncio - pip install -r requirements.txt - - name: Lint with flake8 + pip install .[dev] + - name: Lint with ruff run: | - flake8 . --count --show-source --statistics --exit-zero + ruff check - name: Pytest unit tests run: | - pytest -m "not e2e" --verbose + tox -e py${{ matrix.python-version }} # Sidecar for running e2e tests requires Go SDK - name: Install Go SDK uses: actions/setup-go@v5 @@ -46,7 +45,7 @@ jobs: run: | go install github.com/dapr/durabletask-go@main durabletask-go --port 4001 & - pytest -m "e2e" --verbose + tox -e py${{ matrix.python-version }}-e2e publish: needs: build if: startswith(github.ref, 'refs/tags/v') @@ -70,4 +69,4 @@ jobs: TWINE_PASSWORD: ${{ secrets.PYPI_UPLOAD_PASS }} run: | python -m build - twine upload dist/* \ No newline at end of file + twine upload dist/* diff --git a/Makefile b/Makefile index 5c13901..69daa40 100644 --- a/Makefile +++ b/Makefile @@ -14,7 +14,7 @@ gen-proto: curl -o durabletask/internal/orchestrator_service.proto https://raw.githubusercontent.com/dapr/durabletask-protobuf/refs/heads/main/protos/orchestrator_service.proto curl -H "Accept: application/vnd.github.v3+json" "https://api.github.com/repos/dapr/durabletask-protobuf/commits?path=protos/orchestrator_service.proto&sha=main&per_page=1" | jq -r '.[0].sha' > durabletask/internal/PROTO_SOURCE_COMMIT_HASH # NOTE: remember to check/update pyproject.toml protobuf version to follow https://github.com/grpc/grpc/blob/v{{VERSION GRPC IO TOOL BELLOW}}/tools/distrib/python/grpcio_tools/setup.py - pip install grpcio-tools==1.74.0 + pip install .[dev] python3 -m grpc_tools.protoc --proto_path=. --python_out=. --pyi_out=. --grpc_python_out=. ./durabletask/internal/orchestrator_service.proto rm durabletask/internal/*.proto diff --git a/README.md b/README.md index 4a45d9b..f6a0284 100644 --- a/README.md +++ b/README.md @@ -162,7 +162,6 @@ The following is more information about how to develop this project. Note that d ### Generating protobufs ```sh -pip3 install -r dev-requirements.txt make gen-proto ``` @@ -170,25 +169,38 @@ This will download the `orchestrator_service.proto` from the `microsoft/durablet ### Running unit tests -Unit tests can be run using the following command from the project root. Unit tests _don't_ require a sidecar process to be running. +Unit tests can be run using the following command from the project root. +Unit tests _don't_ require a sidecar process to be running. + +To run on a specific python version (eg: 3.11), run the following command from the project root: ```sh -make test-unit +tox -e py311 ``` ### Running E2E tests -The E2E (end-to-end) tests require a sidecar process to be running. You can use the Dapr sidecar for this or run a Durable Task test sidecar using the following command: +The E2E (end-to-end) tests require a sidecar process to be running. + +For non-multi app activities test you can use the Durable Task test sidecar using the following command: ```sh go install github.com/dapr/durabletask-go@main durabletask-go --port 4001 ``` -To run the E2E tests, run the following command from the project root: +Certain aspects like multi-app activities require the full dapr runtime to be running. + +```shell +dapr init || true + +dapr run --app-id test-app --dapr-grpc-port 4001 --components-path ./examples/components/ +``` + +To run the E2E tests on a specific python version (eg: 3.11), run the following command from the project root: ```sh -make test-e2e +tox -e py311 -- e2e ``` ## Contributing @@ -207,8 +219,8 @@ contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additio ## Trademarks -This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft -trademarks or logos is subject to and must follow +This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft +trademarks or logos is subject to and must follow [Microsoft's Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks/usage/general). Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. Any use of third-party trademarks or logos are subject to those third-party's policies. diff --git a/pyproject.toml b/pyproject.toml index 9bcf1bb..575bc4a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -17,16 +17,16 @@ keywords = [ "workflow" ] classifiers = [ - "Development Status :: 3 - Alpha", - "Programming Language :: Python :: 3", - "License :: OSI Approved :: MIT License", + "Development Status :: 3 - Alpha", + "Programming Language :: Python :: 3", + "License :: OSI Approved :: MIT License", ] requires-python = ">=3.9" license = {file = "LICENSE"} readme = "README.md" dependencies = [ "grpcio", - "protobuf>=6.31.1,<7.0.0", # follows grpcio generation version https://github.com/grpc/grpc/blob/v1.74.0/tools/distrib/python/grpcio_tools/setup.py + "protobuf>=6.31.1,<7.0.0", # follows grpcio generation version https://github.com/grpc/grpc/blob/v1.75.1/tools/distrib/python/grpcio_tools/setup.py "asyncio" ] @@ -48,3 +48,37 @@ pythonpath = ["."] markers = [ "e2e: mark a test as an end-to-end test that requires a running sidecar" ] + +[project.optional-dependencies] +dev = [ + "pytest", + "pytest-asyncio>=0.23", + "tox>=4.0.0", + "pytest-cov", + "ruff", + + # grpc gen + "grpcio-tools==1.75.1", +] + +[tool.ruff] +target-version = "py310" # TODO: update to py310 when we drop support for py39 +line-length = 100 +extend-exclude = [".github", "durabletask/internal/orchestrator_service_*.*"] + +[tool.ruff.lint] +select = [ + "I", # isort + "W", # pycodestyle warnings + "F", # pyflakes + + # TODO: Add those back progressively as we fix the issues + # "E", # pycodestyle errors + # "C", # flake8-comprehensions + # "B", # flake8-bugbear + # "UP", # pyupgrade +] + +[tool.ruff.format] +# follow upstream quote-style instead of dapr/python-sdk to reduce diff +quote-style = "double" diff --git a/requirements.txt b/requirements.txt index 06750e2..7b288f0 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,8 +1 @@ -autopep8 -grpcio>=1.60.0 # 1.60.0 is the version introducing protobuf 1.25.X support, newer versions are backwards compatible -protobuf -asyncio -pytest -pytest-cov -pytest-asyncio -flake8 \ No newline at end of file +# requirements in pyproject.toml diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000..e035797 --- /dev/null +++ b/tox.ini @@ -0,0 +1,34 @@ +[tox] +skipsdist = True +minversion = 3.10.0 +envlist = + py{39,310,311,312,313,314} + ruff, + mypy, +# TODO: switch runner to uv (tox-uv plugin) +runner = virtualenv + +[testenv] +# you can run tox with the e2e pytest marker using tox factors: +# tox -e py39,py310,py311,py312,py313,py314 -- e2e +# or single one with: +# tox -e py310-e2e +# to use custom grpc endpoint and not capture print statements (-s arg in pytest): +# DAPR_GRPC_ENDPOINT=localhost:12345 tox -e py310-e2e -- -s +setenv = + PYTHONDONTWRITEBYTECODE=1 +deps = .[dev] +commands = + !e2e: pytest -m "not e2e" --verbose + e2e: pytest -m e2e --verbose +commands_pre = + pip3 install -e {toxinidir}/ +allowlist_externals = pip3 +pass_env = DAPR_GRPC_ENDPOINT,DAPR_HTTP_ENDPOINT,DAPR_RUNTIME_HOST,DAPR_GRPC_PORT,DAPR_HTTP_PORT + +[testenv:ruff] +basepython = python3 +usedevelop = False +commands = + ruff check --fix + ruff format From 29369d95e593bd00beac718afa482e007a3cb33d Mon Sep 17 00:00:00 2001 From: Albert Callarisa Date: Thu, 30 Oct 2025 23:11:18 +0100 Subject: [PATCH 48/81] run `tox -e ruff` Signed-off-by: Albert Callarisa --- durabletask/__init__.py | 1 - durabletask/aio/client.py | 110 +++-- durabletask/aio/internal/grpc_interceptor.py | 24 +- durabletask/aio/internal/shared.py | 23 +- durabletask/client.py | 112 ++++-- durabletask/internal/grpc_interceptor.py | 34 +- durabletask/internal/helpers.py | 92 +++-- .../internal/orchestrator_service_pb2_grpc.py | 2 +- durabletask/internal/shared.py | 21 +- durabletask/task.py | 104 +++-- durabletask/worker.py | 245 +++++------ examples/activity_sequence.py | 13 +- examples/fanout_fanin.py | 17 +- examples/human_interaction.py | 9 +- tests/durabletask/test_activity_executor.py | 11 +- tests/durabletask/test_client.py | 30 +- tests/durabletask/test_client_async.py | 46 ++- tests/durabletask/test_concurrency_options.py | 8 +- tests/durabletask/test_orchestration_e2e.py | 44 +- .../test_orchestration_e2e_async.py | 36 +- .../test_orchestration_executor.py | 380 ++++++++++++------ tests/durabletask/test_orchestration_wait.py | 25 +- .../test_worker_concurrency_loop.py | 43 +- .../test_worker_concurrency_loop_async.py | 42 +- 24 files changed, 850 insertions(+), 622 deletions(-) diff --git a/durabletask/__init__.py b/durabletask/__init__.py index a37823c..78ea7ca 100644 --- a/durabletask/__init__.py +++ b/durabletask/__init__.py @@ -3,5 +3,4 @@ """Durable Task SDK for Python""" - PACKAGE_NAME = "durabletask" diff --git a/durabletask/aio/client.py b/durabletask/aio/client.py index 4ec9bbf..0f0286c 100644 --- a/durabletask/aio/client.py +++ b/durabletask/aio/client.py @@ -13,22 +13,29 @@ import durabletask.internal.orchestrator_service_pb2 as pb import durabletask.internal.orchestrator_service_pb2_grpc as stubs import durabletask.internal.shared as shared -from durabletask.aio.internal.shared import get_grpc_aio_channel, ClientInterceptor from durabletask import task -from durabletask.client import OrchestrationState, OrchestrationStatus, new_orchestration_state, TInput, TOutput from durabletask.aio.internal.grpc_interceptor import DefaultClientInterceptorImpl +from durabletask.aio.internal.shared import ClientInterceptor, get_grpc_aio_channel +from durabletask.client import ( + OrchestrationState, + OrchestrationStatus, + TInput, + TOutput, + new_orchestration_state, +) class AsyncTaskHubGrpcClient: - - def __init__(self, *, - host_address: Optional[str] = None, - metadata: Optional[list[tuple[str, str]]] = None, - log_handler: Optional[logging.Handler] = None, - log_formatter: Optional[logging.Formatter] = None, - secure_channel: bool = False, - interceptors: Optional[Sequence[ClientInterceptor]] = None): - + def __init__( + self, + *, + host_address: Optional[str] = None, + metadata: Optional[list[tuple[str, str]]] = None, + log_handler: Optional[logging.Handler] = None, + log_formatter: Optional[logging.Formatter] = None, + secure_channel: bool = False, + interceptors: Optional[Sequence[ClientInterceptor]] = None, + ): if interceptors is not None: interceptors = list(interceptors) if metadata is not None: @@ -39,9 +46,7 @@ def __init__(self, *, interceptors = None channel = get_grpc_aio_channel( - host_address=host_address, - secure_channel=secure_channel, - interceptors=interceptors + host_address=host_address, secure_channel=secure_channel, interceptors=interceptors ) self._channel = channel self._stub = stubs.TaskHubSidecarServiceStub(channel) @@ -57,18 +62,23 @@ async def __aexit__(self, exc_type, exc_val, exc_tb): await self.aclose() return False - async def schedule_new_orchestration(self, orchestrator: Union[task.Orchestrator[TInput, TOutput], str], *, - input: Optional[TInput] = None, - instance_id: Optional[str] = None, - start_at: Optional[datetime] = None, - reuse_id_policy: Optional[pb.OrchestrationIdReusePolicy] = None) -> str: - + async def schedule_new_orchestration( + self, + orchestrator: Union[task.Orchestrator[TInput, TOutput], str], + *, + input: Optional[TInput] = None, + instance_id: Optional[str] = None, + start_at: Optional[datetime] = None, + reuse_id_policy: Optional[pb.OrchestrationIdReusePolicy] = None, + ) -> str: name = orchestrator if isinstance(orchestrator, str) else task.get_name(orchestrator) req = pb.CreateInstanceRequest( name=name, instanceId=instance_id if instance_id else uuid.uuid4().hex, - input=wrappers_pb2.StringValue(value=shared.to_json(input)) if input is not None else None, + input=wrappers_pb2.StringValue(value=shared.to_json(input)) + if input is not None + else None, scheduledStartTimestamp=helpers.new_timestamp(start_at) if start_at else None, version=helpers.get_string_value(None), orchestrationIdReusePolicy=reuse_id_policy, @@ -78,20 +88,25 @@ async def schedule_new_orchestration(self, orchestrator: Union[task.Orchestrator res: pb.CreateInstanceResponse = await self._stub.StartInstance(req) return res.instanceId - async def get_orchestration_state(self, instance_id: str, *, fetch_payloads: bool = True) -> Optional[OrchestrationState]: + async def get_orchestration_state( + self, instance_id: str, *, fetch_payloads: bool = True + ) -> Optional[OrchestrationState]: req = pb.GetInstanceRequest(instanceId=instance_id, getInputsAndOutputs=fetch_payloads) res: pb.GetInstanceResponse = await self._stub.GetInstance(req) return new_orchestration_state(req.instanceId, res) - async def wait_for_orchestration_start(self, instance_id: str, *, - fetch_payloads: bool = False, - timeout: int = 0) -> Optional[OrchestrationState]: + async def wait_for_orchestration_start( + self, instance_id: str, *, fetch_payloads: bool = False, timeout: int = 0 + ) -> Optional[OrchestrationState]: req = pb.GetInstanceRequest(instanceId=instance_id, getInputsAndOutputs=fetch_payloads) try: grpc_timeout = None if timeout == 0 else timeout self._logger.info( - f"Waiting {'indefinitely' if timeout == 0 else f'up to {timeout}s'} for instance '{instance_id}' to start.") - res: pb.GetInstanceResponse = await self._stub.WaitForInstanceStart(req, timeout=grpc_timeout) + f"Waiting {'indefinitely' if timeout == 0 else f'up to {timeout}s'} for instance '{instance_id}' to start." + ) + res: pb.GetInstanceResponse = await self._stub.WaitForInstanceStart( + req, timeout=grpc_timeout + ) return new_orchestration_state(req.instanceId, res) except grpc.RpcError as rpc_error: if rpc_error.code() == grpc.StatusCode.DEADLINE_EXCEEDED: # type: ignore @@ -100,22 +115,30 @@ async def wait_for_orchestration_start(self, instance_id: str, *, else: raise - async def wait_for_orchestration_completion(self, instance_id: str, *, - fetch_payloads: bool = True, - timeout: int = 0) -> Optional[OrchestrationState]: + async def wait_for_orchestration_completion( + self, instance_id: str, *, fetch_payloads: bool = True, timeout: int = 0 + ) -> Optional[OrchestrationState]: req = pb.GetInstanceRequest(instanceId=instance_id, getInputsAndOutputs=fetch_payloads) try: grpc_timeout = None if timeout == 0 else timeout self._logger.info( - f"Waiting {'indefinitely' if timeout == 0 else f'up to {timeout}s'} for instance '{instance_id}' to complete.") - res: pb.GetInstanceResponse = await self._stub.WaitForInstanceCompletion(req, timeout=grpc_timeout) + f"Waiting {'indefinitely' if timeout == 0 else f'up to {timeout}s'} for instance '{instance_id}' to complete." + ) + res: pb.GetInstanceResponse = await self._stub.WaitForInstanceCompletion( + req, timeout=grpc_timeout + ) state = new_orchestration_state(req.instanceId, res) if not state: return None - if state.runtime_status == OrchestrationStatus.FAILED and state.failure_details is not None: + if ( + state.runtime_status == OrchestrationStatus.FAILED + and state.failure_details is not None + ): details = state.failure_details - self._logger.info(f"Instance '{instance_id}' failed: [{details.error_type}] {details.message}") + self._logger.info( + f"Instance '{instance_id}' failed: [{details.error_type}] {details.message}" + ) elif state.runtime_status == OrchestrationStatus.TERMINATED: self._logger.info(f"Instance '{instance_id}' was terminated.") elif state.runtime_status == OrchestrationStatus.COMPLETED: @@ -130,26 +153,25 @@ async def wait_for_orchestration_completion(self, instance_id: str, *, raise async def raise_orchestration_event( - self, - instance_id: str, - event_name: str, - *, - data: Optional[Any] = None): + self, instance_id: str, event_name: str, *, data: Optional[Any] = None + ): req = pb.RaiseEventRequest( instanceId=instance_id, name=event_name, - input=wrappers_pb2.StringValue(value=shared.to_json(data)) if data else None) + input=wrappers_pb2.StringValue(value=shared.to_json(data)) if data else None, + ) self._logger.info(f"Raising event '{event_name}' for instance '{instance_id}'.") await self._stub.RaiseEvent(req) - async def terminate_orchestration(self, instance_id: str, *, - output: Optional[Any] = None, - recursive: bool = True): + async def terminate_orchestration( + self, instance_id: str, *, output: Optional[Any] = None, recursive: bool = True + ): req = pb.TerminateRequest( instanceId=instance_id, output=wrappers_pb2.StringValue(value=shared.to_json(output)) if output else None, - recursive=recursive) + recursive=recursive, + ) self._logger.info(f"Terminating instance '{instance_id}'.") await self._stub.TerminateInstance(req) diff --git a/durabletask/aio/internal/grpc_interceptor.py b/durabletask/aio/internal/grpc_interceptor.py index bf1ac98..4c90ab1 100644 --- a/durabletask/aio/internal/grpc_interceptor.py +++ b/durabletask/aio/internal/grpc_interceptor.py @@ -7,23 +7,30 @@ class _ClientCallDetails( - namedtuple( - '_ClientCallDetails', - ['method', 'timeout', 'metadata', 'credentials', 'wait_for_ready', 'compression']), - grpc_aio.ClientCallDetails): + namedtuple( + "_ClientCallDetails", + ["method", "timeout", "metadata", "credentials", "wait_for_ready", "compression"], + ), + grpc_aio.ClientCallDetails, +): pass class DefaultClientInterceptorImpl( - grpc_aio.UnaryUnaryClientInterceptor, grpc_aio.UnaryStreamClientInterceptor, - grpc_aio.StreamUnaryClientInterceptor, grpc_aio.StreamStreamClientInterceptor): + grpc_aio.UnaryUnaryClientInterceptor, + grpc_aio.UnaryStreamClientInterceptor, + grpc_aio.StreamUnaryClientInterceptor, + grpc_aio.StreamStreamClientInterceptor, +): """Async gRPC client interceptor to add metadata to all calls.""" def __init__(self, metadata: list[tuple[str, str]]): super().__init__() self._metadata = metadata - def _intercept_call(self, client_call_details: _ClientCallDetails) -> grpc_aio.ClientCallDetails: + def _intercept_call( + self, client_call_details: _ClientCallDetails + ) -> grpc_aio.ClientCallDetails: if self._metadata is None: return client_call_details @@ -39,7 +46,8 @@ def _intercept_call(self, client_call_details: _ClientCallDetails) -> grpc_aio.C metadata, client_call_details.credentials, client_call_details.wait_for_ready, - client_call_details.compression) + client_call_details.compression, + ) async def intercept_unary_unary(self, continuation, client_call_details, request): new_client_call_details = self._intercept_call(client_call_details) diff --git a/durabletask/aio/internal/shared.py b/durabletask/aio/internal/shared.py index 6bdb256..65d4066 100644 --- a/durabletask/aio/internal/shared.py +++ b/durabletask/aio/internal/shared.py @@ -7,42 +7,43 @@ from grpc import aio as grpc_aio from durabletask.internal.shared import ( - get_default_host_address, - SECURE_PROTOCOLS, INSECURE_PROTOCOLS, + SECURE_PROTOCOLS, + get_default_host_address, ) - ClientInterceptor = Union[ grpc_aio.UnaryUnaryClientInterceptor, grpc_aio.UnaryStreamClientInterceptor, grpc_aio.StreamUnaryClientInterceptor, - grpc_aio.StreamStreamClientInterceptor + grpc_aio.StreamStreamClientInterceptor, ] def get_grpc_aio_channel( - host_address: Optional[str], - secure_channel: bool = False, - interceptors: Optional[Sequence[ClientInterceptor]] = None) -> grpc_aio.Channel: - + host_address: Optional[str], + secure_channel: bool = False, + interceptors: Optional[Sequence[ClientInterceptor]] = None, +) -> grpc_aio.Channel: if host_address is None: host_address = get_default_host_address() for protocol in SECURE_PROTOCOLS: if host_address.lower().startswith(protocol): secure_channel = True - host_address = host_address[len(protocol):] + host_address = host_address[len(protocol) :] break for protocol in INSECURE_PROTOCOLS: if host_address.lower().startswith(protocol): secure_channel = False - host_address = host_address[len(protocol):] + host_address = host_address[len(protocol) :] break if secure_channel: - channel = grpc_aio.secure_channel(host_address, grpc.ssl_channel_credentials(), interceptors=interceptors) + channel = grpc_aio.secure_channel( + host_address, grpc.ssl_channel_credentials(), interceptors=interceptors + ) else: channel = grpc_aio.insecure_channel(host_address, interceptors=interceptors) diff --git a/durabletask/client.py b/durabletask/client.py index b155bd6..79475ec 100644 --- a/durabletask/client.py +++ b/durabletask/client.py @@ -18,12 +18,13 @@ from durabletask import task from durabletask.internal.grpc_interceptor import DefaultClientInterceptorImpl -TInput = TypeVar('TInput') -TOutput = TypeVar('TOutput') +TInput = TypeVar("TInput") +TOutput = TypeVar("TOutput") class OrchestrationStatus(Enum): """The status of an orchestration instance.""" + RUNNING = pb.ORCHESTRATION_STATUS_RUNNING COMPLETED = pb.ORCHESTRATION_STATUS_COMPLETED FAILED = pb.ORCHESTRATION_STATUS_FAILED @@ -52,7 +53,8 @@ def raise_if_failed(self): if self.failure_details is not None: raise OrchestrationFailedError( f"Orchestration '{self.instance_id}' failed: {self.failure_details.message}", - self.failure_details) + self.failure_details, + ) class OrchestrationFailedError(Exception): @@ -65,18 +67,23 @@ def failure_details(self): return self._failure_details -def new_orchestration_state(instance_id: str, res: pb.GetInstanceResponse) -> Optional[OrchestrationState]: +def new_orchestration_state( + instance_id: str, res: pb.GetInstanceResponse +) -> Optional[OrchestrationState]: if not res.exists: return None state = res.orchestrationState failure_details = None - if state.failureDetails.errorMessage != '' or state.failureDetails.errorType != '': + if state.failureDetails.errorMessage != "" or state.failureDetails.errorType != "": failure_details = task.FailureDetails( state.failureDetails.errorMessage, state.failureDetails.errorType, - state.failureDetails.stackTrace.value if not helpers.is_empty(state.failureDetails.stackTrace) else None) + state.failureDetails.stackTrace.value + if not helpers.is_empty(state.failureDetails.stackTrace) + else None, + ) return OrchestrationState( instance_id, @@ -87,19 +94,21 @@ def new_orchestration_state(instance_id: str, res: pb.GetInstanceResponse) -> Op state.input.value if not helpers.is_empty(state.input) else None, state.output.value if not helpers.is_empty(state.output) else None, state.customStatus.value if not helpers.is_empty(state.customStatus) else None, - failure_details) + failure_details, + ) class TaskHubGrpcClient: - - def __init__(self, *, - host_address: Optional[str] = None, - metadata: Optional[list[tuple[str, str]]] = None, - log_handler: Optional[logging.Handler] = None, - log_formatter: Optional[logging.Formatter] = None, - secure_channel: bool = False, - interceptors: Optional[Sequence[shared.ClientInterceptor]] = None): - + def __init__( + self, + *, + host_address: Optional[str] = None, + metadata: Optional[list[tuple[str, str]]] = None, + log_handler: Optional[logging.Handler] = None, + log_formatter: Optional[logging.Formatter] = None, + secure_channel: bool = False, + interceptors: Optional[Sequence[shared.ClientInterceptor]] = None, + ): # If the caller provided metadata, we need to create a new interceptor for it and # add it to the list of interceptors. if interceptors is not None: @@ -112,19 +121,20 @@ def __init__(self, *, interceptors = None channel = shared.get_grpc_channel( - host_address=host_address, - secure_channel=secure_channel, - interceptors=interceptors + host_address=host_address, secure_channel=secure_channel, interceptors=interceptors ) self._stub = stubs.TaskHubSidecarServiceStub(channel) self._logger = shared.get_logger("client", log_handler, log_formatter) - def schedule_new_orchestration(self, orchestrator: Union[task.Orchestrator[TInput, TOutput], str], *, - input: Optional[TInput] = None, - instance_id: Optional[str] = None, - start_at: Optional[datetime] = None, - reuse_id_policy: Optional[pb.OrchestrationIdReusePolicy] = None) -> str: - + def schedule_new_orchestration( + self, + orchestrator: Union[task.Orchestrator[TInput, TOutput], str], + *, + input: Optional[TInput] = None, + instance_id: Optional[str] = None, + start_at: Optional[datetime] = None, + reuse_id_policy: Optional[pb.OrchestrationIdReusePolicy] = None, + ) -> str: name = orchestrator if isinstance(orchestrator, str) else task.get_name(orchestrator) input_pb = ( @@ -144,19 +154,22 @@ def schedule_new_orchestration(self, orchestrator: Union[task.Orchestrator[TInpu res: pb.CreateInstanceResponse = self._stub.StartInstance(req) return res.instanceId - def get_orchestration_state(self, instance_id: str, *, fetch_payloads: bool = True) -> Optional[OrchestrationState]: + def get_orchestration_state( + self, instance_id: str, *, fetch_payloads: bool = True + ) -> Optional[OrchestrationState]: req = pb.GetInstanceRequest(instanceId=instance_id, getInputsAndOutputs=fetch_payloads) res: pb.GetInstanceResponse = self._stub.GetInstance(req) return new_orchestration_state(req.instanceId, res) - def wait_for_orchestration_start(self, instance_id: str, *, - fetch_payloads: bool = False, - timeout: int = 0) -> Optional[OrchestrationState]: + def wait_for_orchestration_start( + self, instance_id: str, *, fetch_payloads: bool = False, timeout: int = 0 + ) -> Optional[OrchestrationState]: req = pb.GetInstanceRequest(instanceId=instance_id, getInputsAndOutputs=fetch_payloads) try: grpc_timeout = None if timeout == 0 else timeout self._logger.info( - f"Waiting {'indefinitely' if timeout == 0 else f'up to {timeout}s'} for instance '{instance_id}' to start.") + f"Waiting {'indefinitely' if timeout == 0 else f'up to {timeout}s'} for instance '{instance_id}' to start." + ) res: pb.GetInstanceResponse = self._stub.WaitForInstanceStart(req, timeout=grpc_timeout) return new_orchestration_state(req.instanceId, res) except grpc.RpcError as rpc_error: @@ -166,22 +179,30 @@ def wait_for_orchestration_start(self, instance_id: str, *, else: raise - def wait_for_orchestration_completion(self, instance_id: str, *, - fetch_payloads: bool = True, - timeout: int = 0) -> Optional[OrchestrationState]: + def wait_for_orchestration_completion( + self, instance_id: str, *, fetch_payloads: bool = True, timeout: int = 0 + ) -> Optional[OrchestrationState]: req = pb.GetInstanceRequest(instanceId=instance_id, getInputsAndOutputs=fetch_payloads) try: grpc_timeout = None if timeout == 0 else timeout self._logger.info( - f"Waiting {'indefinitely' if timeout == 0 else f'up to {timeout}s'} for instance '{instance_id}' to complete.") - res: pb.GetInstanceResponse = self._stub.WaitForInstanceCompletion(req, timeout=grpc_timeout) + f"Waiting {'indefinitely' if timeout == 0 else f'up to {timeout}s'} for instance '{instance_id}' to complete." + ) + res: pb.GetInstanceResponse = self._stub.WaitForInstanceCompletion( + req, timeout=grpc_timeout + ) state = new_orchestration_state(req.instanceId, res) if not state: return None - if state.runtime_status == OrchestrationStatus.FAILED and state.failure_details is not None: + if ( + state.runtime_status == OrchestrationStatus.FAILED + and state.failure_details is not None + ): details = state.failure_details - self._logger.info(f"Instance '{instance_id}' failed: [{details.error_type}] {details.message}") + self._logger.info( + f"Instance '{instance_id}' failed: [{details.error_type}] {details.message}" + ) elif state.runtime_status == OrchestrationStatus.TERMINATED: self._logger.info(f"Instance '{instance_id}' was terminated.") elif state.runtime_status == OrchestrationStatus.COMPLETED: @@ -195,23 +216,26 @@ def wait_for_orchestration_completion(self, instance_id: str, *, else: raise - def raise_orchestration_event(self, instance_id: str, event_name: str, *, - data: Optional[Any] = None): + def raise_orchestration_event( + self, instance_id: str, event_name: str, *, data: Optional[Any] = None + ): req = pb.RaiseEventRequest( instanceId=instance_id, name=event_name, - input=wrappers_pb2.StringValue(value=shared.to_json(data)) if data else None) + input=wrappers_pb2.StringValue(value=shared.to_json(data)) if data else None, + ) self._logger.info(f"Raising event '{event_name}' for instance '{instance_id}'.") self._stub.RaiseEvent(req) - def terminate_orchestration(self, instance_id: str, *, - output: Optional[Any] = None, - recursive: bool = True): + def terminate_orchestration( + self, instance_id: str, *, output: Optional[Any] = None, recursive: bool = True + ): req = pb.TerminateRequest( instanceId=instance_id, output=wrappers_pb2.StringValue(value=shared.to_json(output)) if output else None, - recursive=recursive) + recursive=recursive, + ) self._logger.info(f"Terminating instance '{instance_id}'.") self._stub.TerminateInstance(req) diff --git a/durabletask/internal/grpc_interceptor.py b/durabletask/internal/grpc_interceptor.py index 69db3c5..f9e8fb5 100644 --- a/durabletask/internal/grpc_interceptor.py +++ b/durabletask/internal/grpc_interceptor.py @@ -7,20 +7,26 @@ class _ClientCallDetails( - namedtuple( - '_ClientCallDetails', - ['method', 'timeout', 'metadata', 'credentials', 'wait_for_ready', 'compression']), - grpc.ClientCallDetails): + namedtuple( + "_ClientCallDetails", + ["method", "timeout", "metadata", "credentials", "wait_for_ready", "compression"], + ), + grpc.ClientCallDetails, +): """This is an implementation of the ClientCallDetails interface needed for interceptors. This class takes six named values and inherits the ClientCallDetails from grpc package. This class encloses the values that describe a RPC to be invoked. """ + pass -class DefaultClientInterceptorImpl ( - grpc.UnaryUnaryClientInterceptor, grpc.UnaryStreamClientInterceptor, - grpc.StreamUnaryClientInterceptor, grpc.StreamStreamClientInterceptor): +class DefaultClientInterceptorImpl( + grpc.UnaryUnaryClientInterceptor, + grpc.UnaryStreamClientInterceptor, + grpc.StreamUnaryClientInterceptor, + grpc.StreamStreamClientInterceptor, +): """The class implements a UnaryUnaryClientInterceptor, UnaryStreamClientInterceptor, StreamUnaryClientInterceptor and StreamStreamClientInterceptor from grpc to add an interceptor to add additional headers to all calls as needed.""" @@ -29,10 +35,9 @@ def __init__(self, metadata: list[tuple[str, str]]): super().__init__() self._metadata = metadata - def _intercept_call( - self, client_call_details: _ClientCallDetails) -> grpc.ClientCallDetails: + def _intercept_call(self, client_call_details: _ClientCallDetails) -> grpc.ClientCallDetails: """Internal intercept_call implementation which adds metadata to grpc metadata in the RPC - call details.""" + call details.""" if self._metadata is None: return client_call_details @@ -43,8 +48,13 @@ def _intercept_call( metadata.extend(self._metadata) client_call_details = _ClientCallDetails( - client_call_details.method, client_call_details.timeout, metadata, - client_call_details.credentials, client_call_details.wait_for_ready, client_call_details.compression) + client_call_details.method, + client_call_details.timeout, + metadata, + client_call_details.credentials, + client_call_details.wait_for_ready, + client_call_details.compression, + ) return client_call_details diff --git a/durabletask/internal/helpers.py b/durabletask/internal/helpers.py index 682ab89..8b67219 100644 --- a/durabletask/internal/helpers.py +++ b/durabletask/internal/helpers.py @@ -16,17 +16,23 @@ def new_orchestrator_started_event(timestamp: Optional[datetime] = None) -> pb.H ts = timestamp_pb2.Timestamp() if timestamp is not None: ts.FromDatetime(timestamp) - return pb.HistoryEvent(eventId=-1, timestamp=ts, orchestratorStarted=pb.OrchestratorStartedEvent()) + return pb.HistoryEvent( + eventId=-1, timestamp=ts, orchestratorStarted=pb.OrchestratorStartedEvent() + ) -def new_execution_started_event(name: str, instance_id: str, encoded_input: Optional[str] = None) -> pb.HistoryEvent: +def new_execution_started_event( + name: str, instance_id: str, encoded_input: Optional[str] = None +) -> pb.HistoryEvent: return pb.HistoryEvent( eventId=-1, timestamp=timestamp_pb2.Timestamp(), executionStarted=pb.ExecutionStartedEvent( name=name, input=get_string_value(encoded_input), - orchestrationInstance=pb.OrchestrationInstance(instanceId=instance_id))) + orchestrationInstance=pb.OrchestrationInstance(instanceId=instance_id), + ), + ) def new_timer_created_event(timer_id: int, fire_at: datetime) -> pb.HistoryEvent: @@ -35,7 +41,7 @@ def new_timer_created_event(timer_id: int, fire_at: datetime) -> pb.HistoryEvent return pb.HistoryEvent( eventId=timer_id, timestamp=timestamp_pb2.Timestamp(), - timerCreated=pb.TimerCreatedEvent(fireAt=ts) + timerCreated=pb.TimerCreatedEvent(fireAt=ts), ) @@ -45,23 +51,29 @@ def new_timer_fired_event(timer_id: int, fire_at: datetime) -> pb.HistoryEvent: return pb.HistoryEvent( eventId=-1, timestamp=timestamp_pb2.Timestamp(), - timerFired=pb.TimerFiredEvent(fireAt=ts, timerId=timer_id) + timerFired=pb.TimerFiredEvent(fireAt=ts, timerId=timer_id), ) -def new_task_scheduled_event(event_id: int, name: str, encoded_input: Optional[str] = None) -> pb.HistoryEvent: +def new_task_scheduled_event( + event_id: int, name: str, encoded_input: Optional[str] = None +) -> pb.HistoryEvent: return pb.HistoryEvent( eventId=event_id, timestamp=timestamp_pb2.Timestamp(), - taskScheduled=pb.TaskScheduledEvent(name=name, input=get_string_value(encoded_input)) + taskScheduled=pb.TaskScheduledEvent(name=name, input=get_string_value(encoded_input)), ) -def new_task_completed_event(event_id: int, encoded_output: Optional[str] = None) -> pb.HistoryEvent: +def new_task_completed_event( + event_id: int, encoded_output: Optional[str] = None +) -> pb.HistoryEvent: return pb.HistoryEvent( eventId=-1, timestamp=timestamp_pb2.Timestamp(), - taskCompleted=pb.TaskCompletedEvent(taskScheduledId=event_id, result=get_string_value(encoded_output)) + taskCompleted=pb.TaskCompletedEvent( + taskScheduledId=event_id, result=get_string_value(encoded_output) + ), ) @@ -69,32 +81,33 @@ def new_task_failed_event(event_id: int, ex: Exception) -> pb.HistoryEvent: return pb.HistoryEvent( eventId=-1, timestamp=timestamp_pb2.Timestamp(), - taskFailed=pb.TaskFailedEvent(taskScheduledId=event_id, failureDetails=new_failure_details(ex)) + taskFailed=pb.TaskFailedEvent( + taskScheduledId=event_id, failureDetails=new_failure_details(ex) + ), ) def new_sub_orchestration_created_event( - event_id: int, - name: str, - instance_id: str, - encoded_input: Optional[str] = None) -> pb.HistoryEvent: + event_id: int, name: str, instance_id: str, encoded_input: Optional[str] = None +) -> pb.HistoryEvent: return pb.HistoryEvent( eventId=event_id, timestamp=timestamp_pb2.Timestamp(), subOrchestrationInstanceCreated=pb.SubOrchestrationInstanceCreatedEvent( - name=name, - input=get_string_value(encoded_input), - instanceId=instance_id) + name=name, input=get_string_value(encoded_input), instanceId=instance_id + ), ) -def new_sub_orchestration_completed_event(event_id: int, encoded_output: Optional[str] = None) -> pb.HistoryEvent: +def new_sub_orchestration_completed_event( + event_id: int, encoded_output: Optional[str] = None +) -> pb.HistoryEvent: return pb.HistoryEvent( eventId=-1, timestamp=timestamp_pb2.Timestamp(), subOrchestrationInstanceCompleted=pb.SubOrchestrationInstanceCompletedEvent( - result=get_string_value(encoded_output), - taskScheduledId=event_id) + result=get_string_value(encoded_output), taskScheduledId=event_id + ), ) @@ -103,8 +116,8 @@ def new_sub_orchestration_failed_event(event_id: int, ex: Exception) -> pb.Histo eventId=-1, timestamp=timestamp_pb2.Timestamp(), subOrchestrationInstanceFailed=pb.SubOrchestrationInstanceFailedEvent( - failureDetails=new_failure_details(ex), - taskScheduledId=event_id) + failureDetails=new_failure_details(ex), taskScheduledId=event_id + ), ) @@ -112,7 +125,7 @@ def new_failure_details(ex: Exception) -> pb.TaskFailureDetails: return pb.TaskFailureDetails( errorType=type(ex).__name__, errorMessage=str(ex), - stackTrace=wrappers_pb2.StringValue(value=''.join(traceback.format_tb(ex.__traceback__))) + stackTrace=wrappers_pb2.StringValue(value="".join(traceback.format_tb(ex.__traceback__))), ) @@ -120,7 +133,7 @@ def new_event_raised_event(name: str, encoded_input: Optional[str] = None) -> pb return pb.HistoryEvent( eventId=-1, timestamp=timestamp_pb2.Timestamp(), - eventRaised=pb.EventRaisedEvent(name=name, input=get_string_value(encoded_input)) + eventRaised=pb.EventRaisedEvent(name=name, input=get_string_value(encoded_input)), ) @@ -128,15 +141,13 @@ def new_suspend_event() -> pb.HistoryEvent: return pb.HistoryEvent( eventId=-1, timestamp=timestamp_pb2.Timestamp(), - executionSuspended=pb.ExecutionSuspendedEvent() + executionSuspended=pb.ExecutionSuspendedEvent(), ) def new_resume_event() -> pb.HistoryEvent: return pb.HistoryEvent( - eventId=-1, - timestamp=timestamp_pb2.Timestamp(), - executionResumed=pb.ExecutionResumedEvent() + eventId=-1, timestamp=timestamp_pb2.Timestamp(), executionResumed=pb.ExecutionResumedEvent() ) @@ -144,9 +155,7 @@ def new_terminated_event(*, encoded_output: Optional[str] = None) -> pb.HistoryE return pb.HistoryEvent( eventId=-1, timestamp=timestamp_pb2.Timestamp(), - executionTerminated=pb.ExecutionTerminatedEvent( - input=get_string_value(encoded_output) - ) + executionTerminated=pb.ExecutionTerminatedEvent(input=get_string_value(encoded_output)), ) @@ -185,7 +194,9 @@ def new_create_timer_action(id: int, fire_at: datetime) -> pb.OrchestratorAction return pb.OrchestratorAction(id=id, createTimer=pb.CreateTimerAction(fireAt=timestamp)) -def new_schedule_task_action(id: int, name: str, encoded_input: Optional[str], router: Optional[pb.TaskRouter] = None) -> pb.OrchestratorAction: +def new_schedule_task_action( + id: int, name: str, encoded_input: Optional[str], router: Optional[pb.TaskRouter] = None +) -> pb.OrchestratorAction: return pb.OrchestratorAction( id=id, scheduleTask=pb.ScheduleTaskAction( @@ -204,11 +215,12 @@ def new_timestamp(dt: datetime) -> timestamp_pb2.Timestamp: def new_create_sub_orchestration_action( - id: int, - name: str, - instance_id: Optional[str], - encoded_input: Optional[str], - router: Optional[pb.TaskRouter] = None) -> pb.OrchestratorAction: + id: int, + name: str, + instance_id: Optional[str], + encoded_input: Optional[str], + router: Optional[pb.TaskRouter] = None, +) -> pb.OrchestratorAction: return pb.OrchestratorAction( id=id, createSubOrchestration=pb.CreateSubOrchestrationAction( @@ -222,13 +234,13 @@ def new_create_sub_orchestration_action( def is_empty(v: wrappers_pb2.StringValue): - return v is None or v.value == '' + return v is None or v.value == "" def get_orchestration_status_str(status: pb.OrchestrationStatus): try: const_name = pb.OrchestrationStatus.Name(status) - if const_name.startswith('ORCHESTRATION_STATUS_'): - return const_name[len('ORCHESTRATION_STATUS_'):] + if const_name.startswith("ORCHESTRATION_STATUS_"): + return const_name[len("ORCHESTRATION_STATUS_") :] except Exception: return "UNKNOWN" diff --git a/durabletask/internal/orchestrator_service_pb2_grpc.py b/durabletask/internal/orchestrator_service_pb2_grpc.py index 7c12e1b..3342f3a 100644 --- a/durabletask/internal/orchestrator_service_pb2_grpc.py +++ b/durabletask/internal/orchestrator_service_pb2_grpc.py @@ -6,7 +6,7 @@ from durabletask.internal import orchestrator_service_pb2 as durabletask_dot_internal_dot_orchestrator__service__pb2 from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -GRPC_GENERATED_VERSION = '1.74.0' +GRPC_GENERATED_VERSION = '1.75.1' GRPC_VERSION = grpc.__version__ _version_not_supported = False diff --git a/durabletask/internal/shared.py b/durabletask/internal/shared.py index d15141f..461f2e2 100644 --- a/durabletask/internal/shared.py +++ b/durabletask/internal/shared.py @@ -51,9 +51,10 @@ def get_default_host_address() -> str: def get_grpc_channel( - host_address: Optional[str], - secure_channel: bool = False, - interceptors: Optional[Sequence[ClientInterceptor]] = None) -> grpc.Channel: + host_address: Optional[str], + secure_channel: bool = False, + interceptors: Optional[Sequence[ClientInterceptor]] = None, +) -> grpc.Channel: if host_address is None: host_address = get_default_host_address() @@ -61,14 +62,14 @@ def get_grpc_channel( if host_address.lower().startswith(protocol): secure_channel = True # remove the protocol from the host name - host_address = host_address[len(protocol):] + host_address = host_address[len(protocol) :] break for protocol in INSECURE_PROTOCOLS: if host_address.lower().startswith(protocol): secure_channel = False # remove the protocol from the host name - host_address = host_address[len(protocol):] + host_address = host_address[len(protocol) :] break # Create the base channel @@ -84,9 +85,10 @@ def get_grpc_channel( def get_logger( - name_suffix: str, - log_handler: Optional[logging.Handler] = None, - log_formatter: Optional[logging.Formatter] = None) -> logging.Logger: + name_suffix: str, + log_handler: Optional[logging.Handler] = None, + log_formatter: Optional[logging.Formatter] = None, +) -> logging.Logger: logger = logging.Logger(f"durabletask-{name_suffix}") # Add a default log handler if none is provided @@ -99,7 +101,8 @@ def get_logger( if log_formatter is None: log_formatter = logging.Formatter( fmt="%(asctime)s.%(msecs)03d %(name)s %(levelname)s: %(message)s", - datefmt='%Y-%m-%d %H:%M:%S') + datefmt="%Y-%m-%d %H:%M:%S", + ) log_handler.setFormatter(log_formatter) return logger diff --git a/durabletask/task.py b/durabletask/task.py index 5210c99..2650bfd 100644 --- a/durabletask/task.py +++ b/durabletask/task.py @@ -12,13 +12,12 @@ import durabletask.internal.helpers as pbh import durabletask.internal.orchestrator_service_pb2 as pb -T = TypeVar('T') -TInput = TypeVar('TInput') -TOutput = TypeVar('TOutput') +T = TypeVar("T") +TInput = TypeVar("TInput") +TOutput = TypeVar("TOutput") class OrchestrationContext(ABC): - @property @abstractmethod def instance_id(self) -> str: @@ -98,10 +97,14 @@ def create_timer(self, fire_at: Union[datetime, timedelta]) -> Task: pass @abstractmethod - def call_activity(self, activity: Union[Activity[TInput, TOutput], str], *, - input: Optional[TInput] = None, - retry_policy: Optional[RetryPolicy] = None, - app_id: Optional[str] = None) -> Task[TOutput]: + def call_activity( + self, + activity: Union[Activity[TInput, TOutput], str], + *, + input: Optional[TInput] = None, + retry_policy: Optional[RetryPolicy] = None, + app_id: Optional[str] = None, + ) -> Task[TOutput]: """Schedule an activity for execution. Parameters @@ -123,11 +126,15 @@ def call_activity(self, activity: Union[Activity[TInput, TOutput], str], *, pass @abstractmethod - def call_sub_orchestrator(self, orchestrator: Orchestrator[TInput, TOutput], *, - input: Optional[TInput] = None, - instance_id: Optional[str] = None, - retry_policy: Optional[RetryPolicy] = None, - app_id: Optional[str] = None) -> Task[TOutput]: + def call_sub_orchestrator( + self, + orchestrator: Orchestrator[TInput, TOutput], + *, + input: Optional[TInput] = None, + instance_id: Optional[str] = None, + retry_policy: Optional[RetryPolicy] = None, + app_id: Optional[str] = None, + ) -> Task[TOutput]: """Schedule sub-orchestrator function for execution. Parameters @@ -210,7 +217,8 @@ def __init__(self, message: str, details: pb.TaskFailureDetails): self._details = FailureDetails( details.errorMessage, details.errorType, - details.stackTrace.value if not pbh.is_empty(details.stackTrace) else None) + details.stackTrace.value if not pbh.is_empty(details.stackTrace) else None, + ) @property def details(self) -> FailureDetails: @@ -227,6 +235,7 @@ class OrchestrationStateError(Exception): class Task(ABC, Generic[T]): """Abstract base class for asynchronous tasks in a durable orchestration.""" + _result: T _exception: Optional[TaskFailedError] _parent: Optional[CompositeTask[T]] @@ -250,7 +259,7 @@ def is_failed(self) -> bool: def get_result(self) -> T: """Returns the result of the task.""" if not self._is_complete: - raise ValueError('The task has not completed.') + raise ValueError("The task has not completed.") elif self._exception is not None: raise self._exception return self._result @@ -258,12 +267,13 @@ def get_result(self) -> T: def get_exception(self) -> TaskFailedError: """Returns the exception that caused the task to fail.""" if self._exception is None: - raise ValueError('The task has not failed.') + raise ValueError("The task has not failed.") return self._exception class CompositeTask(Task[T]): """A task that is composed of other tasks.""" + _tasks: list[Task] def __init__(self, tasks: list[Task]): @@ -303,7 +313,7 @@ def pending_tasks(self) -> int: def on_child_completed(self, task: Task[T]): if self.is_complete: - raise ValueError('The task has already completed.') + raise ValueError("The task has already completed.") self._completed_tasks += 1 if task.is_failed and self._exception is None: self._exception = task.get_exception() @@ -318,14 +328,13 @@ def get_completed_tasks(self) -> int: class CompletableTask(Task[T]): - def __init__(self): super().__init__() self._retryable_parent = None def complete(self, result: T): if self._is_complete: - raise ValueError('The task has already completed.') + raise ValueError("The task has already completed.") self._result = result self._is_complete = True if self._parent is not None: @@ -333,7 +342,7 @@ def complete(self, result: T): def fail(self, message: str, details: pb.TaskFailureDetails): if self._is_complete: - raise ValueError('The task has already completed.') + raise ValueError("The task has already completed.") self._exception = TaskFailedError(message, details) self._is_complete = True if self._parent is not None: @@ -343,8 +352,13 @@ def fail(self, message: str, details: pb.TaskFailureDetails): class RetryableTask(CompletableTask[T]): """A task that can be retried according to a retry policy.""" - def __init__(self, retry_policy: RetryPolicy, action: pb.OrchestratorAction, - start_time: datetime, is_sub_orch: bool) -> None: + def __init__( + self, + retry_policy: RetryPolicy, + action: pb.OrchestratorAction, + start_time: datetime, + is_sub_orch: bool, + ) -> None: super().__init__() self._action = action self._retry_policy = retry_policy @@ -360,7 +374,10 @@ def compute_next_delay(self) -> Optional[timedelta]: return None retry_expiration: datetime = datetime.max - if self._retry_policy.retry_timeout is not None and self._retry_policy.retry_timeout != datetime.max: + if ( + self._retry_policy.retry_timeout is not None + and self._retry_policy.retry_timeout != datetime.max + ): retry_expiration = self._start_time + self._retry_policy.retry_timeout if self._retry_policy.backoff_coefficient is None: @@ -369,17 +386,21 @@ def compute_next_delay(self) -> Optional[timedelta]: backoff_coefficient = self._retry_policy.backoff_coefficient if datetime.utcnow() < retry_expiration: - next_delay_f = math.pow(backoff_coefficient, self._attempt_count - 1) * self._retry_policy.first_retry_interval.total_seconds() + next_delay_f = ( + math.pow(backoff_coefficient, self._attempt_count - 1) + * self._retry_policy.first_retry_interval.total_seconds() + ) if self._retry_policy.max_retry_interval is not None: - next_delay_f = min(next_delay_f, self._retry_policy.max_retry_interval.total_seconds()) + next_delay_f = min( + next_delay_f, self._retry_policy.max_retry_interval.total_seconds() + ) return timedelta(seconds=next_delay_f) return None class TimerTask(CompletableTask[T]): - def __init__(self) -> None: super().__init__() @@ -457,12 +478,15 @@ def task_id(self) -> int: class RetryPolicy: """Represents the retry policy for an orchestration or activity function.""" - def __init__(self, *, - first_retry_interval: timedelta, - max_number_of_attempts: int, - backoff_coefficient: Optional[float] = 1.0, - max_retry_interval: Optional[timedelta] = None, - retry_timeout: Optional[timedelta] = None): + def __init__( + self, + *, + first_retry_interval: timedelta, + max_number_of_attempts: int, + backoff_coefficient: Optional[float] = 1.0, + max_retry_interval: Optional[timedelta] = None, + retry_timeout: Optional[timedelta] = None, + ): """Creates a new RetryPolicy instance. Parameters @@ -480,15 +504,15 @@ def __init__(self, *, """ # validate inputs if first_retry_interval < timedelta(seconds=0): - raise ValueError('first_retry_interval must be >= 0') + raise ValueError("first_retry_interval must be >= 0") if max_number_of_attempts < 1: - raise ValueError('max_number_of_attempts must be >= 1') + raise ValueError("max_number_of_attempts must be >= 1") if backoff_coefficient is not None and backoff_coefficient < 1: - raise ValueError('backoff_coefficient must be >= 1') + raise ValueError("backoff_coefficient must be >= 1") if max_retry_interval is not None and max_retry_interval < timedelta(seconds=0): - raise ValueError('max_retry_interval must be >= 0') + raise ValueError("max_retry_interval must be >= 0") if retry_timeout is not None and retry_timeout < timedelta(seconds=0): - raise ValueError('retry_timeout must be >= 0') + raise ValueError("retry_timeout must be >= 0") self._first_retry_interval = first_retry_interval self._max_number_of_attempts = max_number_of_attempts @@ -525,7 +549,9 @@ def retry_timeout(self) -> Optional[timedelta]: def get_name(fn: Callable) -> str: """Returns the name of the provided function""" name = fn.__name__ - if name == '': - raise ValueError('Cannot infer a name from a lambda function. Please provide a name explicitly.') + if name == "": + raise ValueError( + "Cannot infer a name from a lambda function. Please provide a name explicitly." + ) return name diff --git a/durabletask/worker.py b/durabletask/worker.py index 695dc44..2d057e1 100644 --- a/durabletask/worker.py +++ b/durabletask/worker.py @@ -34,10 +34,10 @@ class ConcurrencyOptions: """ def __init__( - self, - maximum_concurrent_activity_work_items: Optional[int] = None, - maximum_concurrent_orchestration_work_items: Optional[int] = None, - maximum_thread_pool_workers: Optional[int] = None, + self, + maximum_concurrent_activity_work_items: Optional[int] = None, + maximum_concurrent_orchestration_work_items: Optional[int] = None, + maximum_thread_pool_workers: Optional[int] = None, ): """Initialize concurrency options. @@ -214,20 +214,18 @@ class TaskHubGrpcWorker: _interceptors: Optional[list[shared.ClientInterceptor]] = None def __init__( - self, - *, - host_address: Optional[str] = None, - metadata: Optional[list[tuple[str, str]]] = None, - log_handler=None, - log_formatter: Optional[logging.Formatter] = None, - secure_channel: bool = False, - interceptors: Optional[Sequence[shared.ClientInterceptor]] = None, - concurrency_options: Optional[ConcurrencyOptions] = None, + self, + *, + host_address: Optional[str] = None, + metadata: Optional[list[tuple[str, str]]] = None, + log_handler=None, + log_formatter: Optional[logging.Formatter] = None, + secure_channel: bool = False, + interceptors: Optional[Sequence[shared.ClientInterceptor]] = None, + concurrency_options: Optional[ConcurrencyOptions] = None, ): self._registry = _Registry() - self._host_address = ( - host_address if host_address else shared.get_default_host_address() - ) + self._host_address = host_address if host_address else shared.get_default_host_address() self._logger = shared.get_logger("worker", log_handler, log_formatter) self._shutdown = Event() self._is_running = False @@ -235,9 +233,7 @@ def __init__( # Use provided concurrency options or create default ones self._concurrency_options = ( - concurrency_options - if concurrency_options is not None - else ConcurrencyOptions() + concurrency_options if concurrency_options is not None else ConcurrencyOptions() ) # Determine the interceptors to use @@ -266,17 +262,13 @@ def __exit__(self, type, value, traceback): def add_orchestrator(self, fn: task.Orchestrator) -> str: """Registers an orchestrator function with the worker.""" if self._is_running: - raise RuntimeError( - "Orchestrators cannot be added while the worker is running." - ) + raise RuntimeError("Orchestrators cannot be added while the worker is running.") return self._registry.add_orchestrator(fn) def add_activity(self, fn: task.Activity) -> str: """Registers an activity function with the worker.""" if self._is_running: - raise RuntimeError( - "Activities cannot be added while the worker is running." - ) + raise RuntimeError("Activities cannot be added while the worker is running.") return self._registry.add_activity(fn) def start(self): @@ -413,9 +405,7 @@ def stream_reader(): loop = asyncio.get_running_loop() while not self._shutdown.is_set(): try: - work_item = await loop.run_in_executor( - None, work_item_queue.get - ) + work_item = await loop.run_in_executor(None, work_item_queue.get) if isinstance(work_item, Exception): raise work_item request_type = work_item.WhichOneof("request") @@ -437,9 +427,7 @@ def stream_reader(): elif work_item.HasField("healthPing"): pass else: - self._logger.warning( - f"Unexpected work item type: {request_type}" - ) + self._logger.warning(f"Unexpected work item type: {request_type}") except Exception as e: self._logger.warning(f"Error in work item stream: {e}") raise e @@ -457,7 +445,10 @@ def stream_reader(): break elif error_code == grpc.StatusCode.UNAVAILABLE: # Check if this is a connection timeout scenario - if "Timeout occurred" in error_details or "Failed to connect to remote host" in error_details: + if ( + "Timeout occurred" in error_details + or "Failed to connect to remote host" in error_details + ): self._logger.warning( f"Connection timeout to {self._host_address}: {error_details} - will retry with fresh connection" ) @@ -499,10 +490,10 @@ def stop(self): self._is_running = False def _execute_orchestrator( - self, - req: pb.OrchestratorRequest, - stub: stubs.TaskHubSidecarServiceStub, - completionToken, + self, + req: pb.OrchestratorRequest, + stub: stubs.TaskHubSidecarServiceStub, + completionToken, ): try: executor = _OrchestrationExecutor(self._registry, self._logger) @@ -537,17 +528,15 @@ def _execute_orchestrator( ) def _execute_activity( - self, - req: pb.ActivityRequest, - stub: stubs.TaskHubSidecarServiceStub, - completionToken, + self, + req: pb.ActivityRequest, + stub: stubs.TaskHubSidecarServiceStub, + completionToken, ): instance_id = req.orchestrationInstance.instanceId try: executor = _ActivityExecutor(self._registry, self._logger) - result = executor.execute( - instance_id, req.name, req.taskId, req.input.value - ) + result = executor.execute(instance_id, req.name, req.taskId, req.input.value) res = pb.ActivityResponse( instanceId=instance_id, taskId=req.taskId, @@ -626,10 +615,10 @@ def resume(self): self._previous_task = next_task def set_complete( - self, - result: Any, - status: pb.OrchestrationStatus, - is_result_encoded: bool = False, + self, + result: Any, + status: pb.OrchestrationStatus, + is_result_encoded: bool = False, ): if self._is_complete: return @@ -687,9 +676,7 @@ def get_actions(self) -> list[pb.OrchestratorAction]: # replayed when the new instance starts. for event_name, values in self._received_events.items(): for event_value in values: - encoded_value = ( - shared.to_json(event_value) if event_value else None - ) + encoded_value = shared.to_json(event_value) if event_value else None carryover_events.append( ph.new_event_raised_event(event_name, encoded_value) ) @@ -738,9 +725,9 @@ def create_timer(self, fire_at: Union[datetime, timedelta]) -> task.Task: return self.create_timer_internal(fire_at) def create_timer_internal( - self, - fire_at: Union[datetime, timedelta], - retryable_task: Optional[task.RetryableTask] = None, + self, + fire_at: Union[datetime, timedelta], + retryable_task: Optional[task.RetryableTask] = None, ) -> task.Task: id = self.next_sequence_number() if isinstance(fire_at, timedelta): @@ -755,12 +742,12 @@ def create_timer_internal( return timer_task def call_activity( - self, - activity: Union[task.Activity[TInput, TOutput], str], - *, - input: Optional[TInput] = None, - retry_policy: Optional[task.RetryPolicy] = None, - app_id: Optional[str] = None, + self, + activity: Union[task.Activity[TInput, TOutput], str], + *, + input: Optional[TInput] = None, + retry_policy: Optional[task.RetryPolicy] = None, + app_id: Optional[str] = None, ) -> task.Task[TOutput]: id = self.next_sequence_number() @@ -770,13 +757,13 @@ def call_activity( return self._pending_tasks.get(id, task.CompletableTask()) def call_sub_orchestrator( - self, - orchestrator: Union[task.Orchestrator[TInput, TOutput], str], - *, - input: Optional[TInput] = None, - instance_id: Optional[str] = None, - retry_policy: Optional[task.RetryPolicy] = None, - app_id: Optional[str] = None, + self, + orchestrator: Union[task.Orchestrator[TInput, TOutput], str], + *, + input: Optional[TInput] = None, + instance_id: Optional[str] = None, + retry_policy: Optional[task.RetryPolicy] = None, + app_id: Optional[str] = None, ) -> task.Task[TOutput]: id = self.next_sequence_number() if isinstance(orchestrator, str): @@ -795,16 +782,16 @@ def call_sub_orchestrator( return self._pending_tasks.get(id, task.CompletableTask()) def call_activity_function_helper( - self, - id: Optional[int], - activity_function: Union[task.Activity[TInput, TOutput], str], - *, - input: Optional[TInput] = None, - retry_policy: Optional[task.RetryPolicy] = None, - is_sub_orch: bool = False, - instance_id: Optional[str] = None, - fn_task: Optional[task.CompletableTask[TOutput]] = None, - app_id: Optional[str] = None, + self, + id: Optional[int], + activity_function: Union[task.Activity[TInput, TOutput], str], + *, + input: Optional[TInput] = None, + retry_policy: Optional[task.RetryPolicy] = None, + is_sub_orch: bool = False, + instance_id: Optional[str] = None, + fn_task: Optional[task.CompletableTask[TOutput]] = None, + app_id: Optional[str] = None, ): if id is None: id = self.next_sequence_number() @@ -883,9 +870,7 @@ class ExecutionResults: actions: list[pb.OrchestratorAction] encoded_custom_status: Optional[str] - def __init__( - self, actions: list[pb.OrchestratorAction], encoded_custom_status: Optional[str] - ): + def __init__(self, actions: list[pb.OrchestratorAction], encoded_custom_status: Optional[str]): self.actions = actions self.encoded_custom_status = encoded_custom_status @@ -900,10 +885,10 @@ def __init__(self, registry: _Registry, logger: logging.Logger): self._suspended_events: list[pb.HistoryEvent] = [] def execute( - self, - instance_id: str, - old_events: Sequence[pb.HistoryEvent], - new_events: Sequence[pb.HistoryEvent], + self, + instance_id: str, + old_events: Sequence[pb.HistoryEvent], + new_events: Sequence[pb.HistoryEvent], ) -> ExecutionResults: if not new_events: raise task.OrchestrationStateError( @@ -941,28 +926,22 @@ def execute( f"{instance_id}: Orchestrator yielded with {task_count} task(s) and {event_count} event(s) outstanding." ) elif ( - ctx._completion_status and ctx._completion_status is not pb.ORCHESTRATION_STATUS_CONTINUED_AS_NEW + ctx._completion_status + and ctx._completion_status is not pb.ORCHESTRATION_STATUS_CONTINUED_AS_NEW ): - completion_status_str = ph.get_orchestration_status_str( - ctx._completion_status - ) + completion_status_str = ph.get_orchestration_status_str(ctx._completion_status) self._logger.info( f"{instance_id}: Orchestration completed with status: {completion_status_str}" ) actions = ctx.get_actions() if self._logger.level <= logging.DEBUG: - self._logger.debug( f"{instance_id}: Returning {len(actions)} action(s): {_get_action_summary(actions)}" ) - return ExecutionResults( - actions=actions, encoded_custom_status=ctx._encoded_custom_status - ) + return ExecutionResults(actions=actions, encoded_custom_status=ctx._encoded_custom_status) - def process_event( - self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEvent - ) -> None: + def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEvent) -> None: if self._is_suspended and _is_suspendable(event): # We are suspended, so we need to buffer this event until we are resumed self._suspended_events.append(event) @@ -988,13 +967,12 @@ def process_event( # deserialize the input, if any input = None if ( - event.executionStarted.input is not None and event.executionStarted.input.value != "" + event.executionStarted.input is not None + and event.executionStarted.input.value != "" ): input = shared.from_json(event.executionStarted.input.value) - result = fn( - ctx, input - ) # this does not execute the generator, only creates it + result = fn(ctx, input) # this does not execute the generator, only creates it if isinstance(result, GeneratorType): # Start the orchestrator's generator function ctx.run(result) @@ -1007,14 +985,10 @@ def process_event( timer_id = event.eventId action = ctx._pending_actions.pop(timer_id, None) if not action: - raise _get_non_determinism_error( - timer_id, task.get_name(ctx.create_timer) - ) + raise _get_non_determinism_error(timer_id, task.get_name(ctx.create_timer)) elif not action.HasField("createTimer"): expected_method_name = task.get_name(ctx.create_timer) - raise _get_wrong_action_type_error( - timer_id, expected_method_name, action - ) + raise _get_wrong_action_type_error(timer_id, expected_method_name, action) elif event.HasField("timerFired"): timer_id = event.timerFired.timerId timer_task = ctx._pending_tasks.pop(timer_id, None) @@ -1059,14 +1033,10 @@ def process_event( action = ctx._pending_actions.pop(task_id, None) activity_task = ctx._pending_tasks.get(task_id, None) if not action: - raise _get_non_determinism_error( - task_id, task.get_name(ctx.call_activity) - ) + raise _get_non_determinism_error(task_id, task.get_name(ctx.call_activity)) elif not action.HasField("scheduleTask"): expected_method_name = task.get_name(ctx.call_activity) - raise _get_wrong_action_type_error( - task_id, expected_method_name, action - ) + raise _get_wrong_action_type_error(task_id, expected_method_name, action) elif action.scheduleTask.name != event.taskScheduled.name: raise _get_wrong_action_name_error( task_id, @@ -1132,11 +1102,9 @@ def process_event( ) elif not action.HasField("createSubOrchestration"): expected_method_name = task.get_name(ctx.call_sub_orchestrator) - raise _get_wrong_action_type_error( - task_id, expected_method_name, action - ) + raise _get_wrong_action_type_error(task_id, expected_method_name, action) elif ( - action.createSubOrchestration.name != event.subOrchestrationInstanceCreated.name + action.createSubOrchestration.name != event.subOrchestrationInstanceCreated.name ): raise _get_wrong_action_name_error( task_id, @@ -1156,9 +1124,7 @@ def process_event( return result = None if not ph.is_empty(event.subOrchestrationInstanceCompleted.result): - result = shared.from_json( - event.subOrchestrationInstanceCompleted.result.value - ) + result = shared.from_json(event.subOrchestrationInstanceCompleted.result.value) sub_orch_task.complete(result) ctx.resume() elif event.HasField("subOrchestrationInstanceFailed"): @@ -1260,16 +1226,14 @@ def __init__(self, registry: _Registry, logger: logging.Logger): self._logger = logger def execute( - self, - orchestration_id: str, - name: str, - task_id: int, - encoded_input: Optional[str], + self, + orchestration_id: str, + name: str, + task_id: int, + encoded_input: Optional[str], ) -> Optional[str]: """Executes an activity function and returns the serialized result, if any.""" - self._logger.debug( - f"{orchestration_id}/{task_id}: Executing activity '{name}'..." - ) + self._logger.debug(f"{orchestration_id}/{task_id}: Executing activity '{name}'...") fn = self._registry.get_activity(name) if not fn: raise ActivityNotRegisteredError( @@ -1282,9 +1246,7 @@ def execute( # Execute the activity function activity_output = fn(ctx, activity_input) - encoded_output = ( - shared.to_json(activity_output) if activity_output is not None else None - ) + encoded_output = shared.to_json(activity_output) if activity_output is not None else None chars = len(encoded_output) if encoded_output else 0 self._logger.debug( f"{orchestration_id}/{task_id}: Activity '{name}' completed successfully with {chars} char(s) of encoded output." @@ -1292,9 +1254,7 @@ def execute( return encoded_output -def _get_non_determinism_error( - task_id: int, action_name: str -) -> task.NonDeterminismError: +def _get_non_determinism_error(task_id: int, action_name: str) -> task.NonDeterminismError: return task.NonDeterminismError( f"A previous execution called {action_name} with ID={task_id}, but the current " f"execution doesn't have this action with this ID. This problem occurs when either " @@ -1304,7 +1264,7 @@ def _get_non_determinism_error( def _get_wrong_action_type_error( - task_id: int, expected_method_name: str, action: pb.OrchestratorAction + task_id: int, expected_method_name: str, action: pb.OrchestratorAction ) -> task.NonDeterminismError: unexpected_method_name = _get_method_name_for_action(action) return task.NonDeterminismError( @@ -1317,7 +1277,7 @@ def _get_wrong_action_type_error( def _get_wrong_action_name_error( - task_id: int, method_name: str, expected_task_name: str, actual_task_name: str + task_id: int, method_name: str, expected_task_name: str, actual_task_name: str ) -> task.NonDeterminismError: return task.NonDeterminismError( f"Failed to restore orchestration state due to a history mismatch: A previous execution called " @@ -1425,9 +1385,7 @@ def _ensure_queues_for_current_loop(self): if self.orchestration_queue is not None: try: while not self.orchestration_queue.empty(): - existing_orchestration_items.append( - self.orchestration_queue.get_nowait() - ) + existing_orchestration_items.append(self.orchestration_queue.get_nowait()) except Exception: pass @@ -1471,9 +1429,7 @@ async def run(self): if self.activity_queue is not None and self.orchestration_queue is not None: await asyncio.gather( self._consume_queue(self.activity_queue, self.activity_semaphore), - self._consume_queue( - self.orchestration_queue, self.orchestration_semaphore - ), + self._consume_queue(self.orchestration_queue, self.orchestration_semaphore), ) async def _consume_queue(self, queue: asyncio.Queue, semaphore: asyncio.Semaphore): @@ -1502,7 +1458,7 @@ async def _consume_queue(self, queue: asyncio.Queue, semaphore: asyncio.Semaphor running_tasks.add(task) async def _process_work_item( - self, semaphore: asyncio.Semaphore, queue: asyncio.Queue, func, args, kwargs + self, semaphore: asyncio.Semaphore, queue: asyncio.Queue, func, args, kwargs ): async with semaphore: try: @@ -1517,13 +1473,12 @@ async def _run_func(self, func, *args, **kwargs): loop = asyncio.get_running_loop() # Avoid submitting to executor after shutdown if ( - getattr(self, "_shutdown", False) and getattr(self, "thread_pool", None) and getattr( - self.thread_pool, "_shutdown", False) + getattr(self, "_shutdown", False) + and getattr(self, "thread_pool", None) + and getattr(self.thread_pool, "_shutdown", False) ): return None - return await loop.run_in_executor( - self.thread_pool, lambda: func(*args, **kwargs) - ) + return await loop.run_in_executor(self.thread_pool, lambda: func(*args, **kwargs)) def submit_activity(self, func, *args, **kwargs): work_item = (func, args, kwargs) diff --git a/examples/activity_sequence.py b/examples/activity_sequence.py index 066a733..fa88363 100644 --- a/examples/activity_sequence.py +++ b/examples/activity_sequence.py @@ -1,19 +1,20 @@ """End-to-end sample that demonstrates how to configure an orchestrator that calls an activity function in a sequence and prints the outputs.""" + from durabletask import client, task, worker def hello(ctx: task.ActivityContext, name: str) -> str: """Activity function that returns a greeting""" - return f'Hello {name}!' + return f"Hello {name}!" def sequence(ctx: task.OrchestrationContext, _): """Orchestrator function that calls the 'hello' activity function in a sequence""" # call "hello" activity function in a sequence - result1 = yield ctx.call_activity(hello, input='Tokyo') - result2 = yield ctx.call_activity(hello, input='Seattle') - result3 = yield ctx.call_activity(hello, input='London') + result1 = yield ctx.call_activity(hello, input="Tokyo") + result2 = yield ctx.call_activity(hello, input="Seattle") + result3 = yield ctx.call_activity(hello, input="London") # return an array of results return [result1, result2, result3] @@ -30,6 +31,6 @@ def sequence(ctx: task.OrchestrationContext, _): instance_id = c.schedule_new_orchestration(sequence) state = c.wait_for_orchestration_completion(instance_id, timeout=10) if state and state.runtime_status == client.OrchestrationStatus.COMPLETED: - print(f'Orchestration completed! Result: {state.serialized_output}') + print(f"Orchestration completed! Result: {state.serialized_output}") elif state: - print(f'Orchestration failed: {state.failure_details}') + print(f"Orchestration failed: {state.failure_details}") diff --git a/examples/fanout_fanin.py b/examples/fanout_fanin.py index c53744f..30339b7 100644 --- a/examples/fanout_fanin.py +++ b/examples/fanout_fanin.py @@ -1,6 +1,7 @@ """End-to-end sample that demonstrates how to configure an orchestrator that a dynamic number activity functions in parallel, waits for them all to complete, and prints an aggregate summary of the outputs.""" + import random import time @@ -11,13 +12,13 @@ def get_work_items(ctx: task.ActivityContext, _) -> list[str]: """Activity function that returns a list of work items""" # return a random number of work items count = random.randint(2, 10) - print(f'generating {count} work items...') - return [f'work item {i}' for i in range(count)] + print(f"generating {count} work items...") + return [f"work item {i}" for i in range(count)] def process_work_item(ctx: task.ActivityContext, item: str) -> int: """Activity function that returns a result for a given work item""" - print(f'processing work item: {item}') + print(f"processing work item: {item}") # simulate some work that takes a variable amount of time time.sleep(random.random() * 5) @@ -39,9 +40,9 @@ def orchestrator(ctx: task.OrchestrationContext, _): # return an aggregate summary of the results return { - 'work_items': work_items, - 'results': results, - 'total': sum(results), + "work_items": work_items, + "results": results, + "total": sum(results), } @@ -57,6 +58,6 @@ def orchestrator(ctx: task.OrchestrationContext, _): instance_id = c.schedule_new_orchestration(orchestrator) state = c.wait_for_orchestration_completion(instance_id, timeout=30) if state and state.runtime_status == client.OrchestrationStatus.COMPLETED: - print(f'Orchestration completed! Result: {state.serialized_output}') + print(f"Orchestration completed! Result: {state.serialized_output}") elif state: - print(f'Orchestration failed: {state.failure_details}') + print(f"Orchestration failed: {state.failure_details}") diff --git a/examples/human_interaction.py b/examples/human_interaction.py index 2a01897..9773055 100644 --- a/examples/human_interaction.py +++ b/examples/human_interaction.py @@ -15,23 +15,24 @@ @dataclass class Order: """Represents a purchase order""" + Cost: float Product: str Quantity: int def __str__(self): - return f'{self.Product} ({self.Quantity})' + return f"{self.Product} ({self.Quantity})" def send_approval_request(_: task.ActivityContext, order: Order) -> None: """Activity function that sends an approval request to the manager""" time.sleep(5) - print(f'*** Sending approval request for order: {order}') + print(f"*** Sending approval request for order: {order}") def place_order(_: task.ActivityContext, order: Order) -> None: """Activity function that places an order""" - print(f'*** Placing order: {order}') + print(f"*** Placing order: {order}") def purchase_order_workflow(ctx: task.OrchestrationContext, order: Order): @@ -92,7 +93,7 @@ def prompt_for_approval(): if not state: print("Workflow not found!") # not expected elif state.runtime_status == client.OrchestrationStatus.COMPLETED: - print(f'Orchestration completed! Result: {state.serialized_output}') + print(f"Orchestration completed! Result: {state.serialized_output}") else: state.raise_if_failed() # raises an exception except TimeoutError: diff --git a/tests/durabletask/test_activity_executor.py b/tests/durabletask/test_activity_executor.py index bfc8eaf..996ae44 100644 --- a/tests/durabletask/test_activity_executor.py +++ b/tests/durabletask/test_activity_executor.py @@ -8,16 +8,18 @@ from durabletask import task, worker logging.basicConfig( - format='%(asctime)s.%(msecs)03d %(name)s %(levelname)s: %(message)s', - datefmt='%Y-%m-%d %H:%M:%S', - level=logging.DEBUG) + format="%(asctime)s.%(msecs)03d %(name)s %(levelname)s: %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", + level=logging.DEBUG, +) TEST_LOGGER = logging.getLogger("tests") -TEST_INSTANCE_ID = 'abc123' +TEST_INSTANCE_ID = "abc123" TEST_TASK_ID = 42 def test_activity_inputs(): """Validates activity function input population""" + def test_activity(ctx: task.ActivityContext, test_input: Any): # return all activity inputs back as the output return test_input, ctx.orchestration_id, ctx.task_id @@ -34,7 +36,6 @@ def test_activity(ctx: task.ActivityContext, test_input: Any): def test_activity_not_registered(): - def test_activity(ctx: task.ActivityContext, _): pass # not used diff --git a/tests/durabletask/test_client.py b/tests/durabletask/test_client.py index e750134..d55e0e0 100644 --- a/tests/durabletask/test_client.py +++ b/tests/durabletask/test_client.py @@ -1,36 +1,39 @@ from unittest.mock import ANY, patch from durabletask.internal.grpc_interceptor import DefaultClientInterceptorImpl -from durabletask.internal.shared import (get_default_host_address, - get_grpc_channel) +from durabletask.internal.shared import get_default_host_address, get_grpc_channel -HOST_ADDRESS = 'localhost:50051' -METADATA = [('key1', 'value1'), ('key2', 'value2')] +HOST_ADDRESS = "localhost:50051" +METADATA = [("key1", "value1"), ("key2", "value2")] INTERCEPTORS = [DefaultClientInterceptorImpl(METADATA)] def test_get_grpc_channel_insecure(): - with patch('grpc.insecure_channel') as mock_channel: + with patch("grpc.insecure_channel") as mock_channel: get_grpc_channel(HOST_ADDRESS, False, interceptors=INTERCEPTORS) mock_channel.assert_called_once_with(HOST_ADDRESS) def test_get_grpc_channel_secure(): - with patch('grpc.secure_channel') as mock_channel, patch( - 'grpc.ssl_channel_credentials') as mock_credentials: + with ( + patch("grpc.secure_channel") as mock_channel, + patch("grpc.ssl_channel_credentials") as mock_credentials, + ): get_grpc_channel(HOST_ADDRESS, True, interceptors=INTERCEPTORS) mock_channel.assert_called_once_with(HOST_ADDRESS, mock_credentials.return_value) def test_get_grpc_channel_default_host_address(): - with patch('grpc.insecure_channel') as mock_channel: + with patch("grpc.insecure_channel") as mock_channel: get_grpc_channel(None, False, interceptors=INTERCEPTORS) mock_channel.assert_called_once_with(get_default_host_address()) def test_get_grpc_channel_with_metadata(): - with patch('grpc.insecure_channel') as mock_channel, patch( - 'grpc.intercept_channel') as mock_intercept_channel: + with ( + patch("grpc.insecure_channel") as mock_channel, + patch("grpc.intercept_channel") as mock_intercept_channel, + ): get_grpc_channel(HOST_ADDRESS, False, interceptors=INTERCEPTORS) mock_channel.assert_called_once_with(HOST_ADDRESS) mock_intercept_channel.assert_called_once() @@ -43,9 +46,10 @@ def test_get_grpc_channel_with_metadata(): def test_grpc_channel_with_host_name_protocol_stripping(): - with patch('grpc.insecure_channel') as mock_insecure_channel, patch( - 'grpc.secure_channel') as mock_secure_channel: - + with ( + patch("grpc.insecure_channel") as mock_insecure_channel, + patch("grpc.secure_channel") as mock_secure_channel, + ): host_name = "myserver.com:1234" prefix = "grpc://" diff --git a/tests/durabletask/test_client_async.py b/tests/durabletask/test_client_async.py index 8f2b83e..0588ff1 100644 --- a/tests/durabletask/test_client_async.py +++ b/tests/durabletask/test_client_async.py @@ -3,54 +3,60 @@ from unittest.mock import ANY, patch +from durabletask.aio.client import AsyncTaskHubGrpcClient from durabletask.aio.internal.grpc_interceptor import DefaultClientInterceptorImpl -from durabletask.internal.shared import get_default_host_address from durabletask.aio.internal.shared import get_grpc_aio_channel -from durabletask.aio.client import AsyncTaskHubGrpcClient - +from durabletask.internal.shared import get_default_host_address -HOST_ADDRESS = 'localhost:50051' -METADATA = [('key1', 'value1'), ('key2', 'value2')] +HOST_ADDRESS = "localhost:50051" +METADATA = [("key1", "value1"), ("key2", "value2")] INTERCEPTORS_AIO = [DefaultClientInterceptorImpl(METADATA)] def test_get_grpc_aio_channel_insecure(): - with patch('durabletask.aio.internal.shared.grpc_aio.insecure_channel') as mock_channel: + with patch("durabletask.aio.internal.shared.grpc_aio.insecure_channel") as mock_channel: get_grpc_aio_channel(HOST_ADDRESS, False, interceptors=INTERCEPTORS_AIO) mock_channel.assert_called_once_with(HOST_ADDRESS, interceptors=INTERCEPTORS_AIO) def test_get_grpc_aio_channel_secure(): - with patch('durabletask.aio.internal.shared.grpc_aio.secure_channel') as mock_channel, patch( - 'grpc.ssl_channel_credentials') as mock_credentials: + with ( + patch("durabletask.aio.internal.shared.grpc_aio.secure_channel") as mock_channel, + patch("grpc.ssl_channel_credentials") as mock_credentials, + ): get_grpc_aio_channel(HOST_ADDRESS, True, interceptors=INTERCEPTORS_AIO) - mock_channel.assert_called_once_with(HOST_ADDRESS, mock_credentials.return_value, interceptors=INTERCEPTORS_AIO) + mock_channel.assert_called_once_with( + HOST_ADDRESS, mock_credentials.return_value, interceptors=INTERCEPTORS_AIO + ) def test_get_grpc_aio_channel_default_host_address(): - with patch('durabletask.aio.internal.shared.grpc_aio.insecure_channel') as mock_channel: + with patch("durabletask.aio.internal.shared.grpc_aio.insecure_channel") as mock_channel: get_grpc_aio_channel(None, False, interceptors=INTERCEPTORS_AIO) - mock_channel.assert_called_once_with(get_default_host_address(), interceptors=INTERCEPTORS_AIO) + mock_channel.assert_called_once_with( + get_default_host_address(), interceptors=INTERCEPTORS_AIO + ) def test_get_grpc_aio_channel_with_interceptors(): - with patch('durabletask.aio.internal.shared.grpc_aio.insecure_channel') as mock_channel: + with patch("durabletask.aio.internal.shared.grpc_aio.insecure_channel") as mock_channel: get_grpc_aio_channel(HOST_ADDRESS, False, interceptors=INTERCEPTORS_AIO) mock_channel.assert_called_once_with(HOST_ADDRESS, interceptors=INTERCEPTORS_AIO) # Capture and check the arguments passed to insecure_channel() args, kwargs = mock_channel.call_args assert args[0] == HOST_ADDRESS - assert 'interceptors' in kwargs - interceptors = kwargs['interceptors'] + assert "interceptors" in kwargs + interceptors = kwargs["interceptors"] assert isinstance(interceptors[0], DefaultClientInterceptorImpl) assert interceptors[0]._metadata == METADATA def test_grpc_aio_channel_with_host_name_protocol_stripping(): - with patch('durabletask.aio.internal.shared.grpc_aio.insecure_channel') as mock_insecure_channel, patch( - 'durabletask.aio.internal.shared.grpc_aio.secure_channel') as mock_secure_channel: - + with ( + patch("durabletask.aio.internal.shared.grpc_aio.insecure_channel") as mock_insecure_channel, + patch("durabletask.aio.internal.shared.grpc_aio.secure_channel") as mock_secure_channel, + ): host_name = "myserver.com:1234" prefix = "grpc://" @@ -95,12 +101,12 @@ def test_grpc_aio_channel_with_host_name_protocol_stripping(): def test_async_client_construct_with_metadata(): - with patch('durabletask.aio.internal.shared.grpc_aio.insecure_channel') as mock_channel: + with patch("durabletask.aio.internal.shared.grpc_aio.insecure_channel") as mock_channel: AsyncTaskHubGrpcClient(host_address=HOST_ADDRESS, metadata=METADATA) # Ensure channel created with an interceptor that has the expected metadata args, kwargs = mock_channel.call_args assert args[0] == HOST_ADDRESS - assert 'interceptors' in kwargs - interceptors = kwargs['interceptors'] + assert "interceptors" in kwargs + interceptors = kwargs["interceptors"] assert isinstance(interceptors[0], DefaultClientInterceptorImpl) assert interceptors[0]._metadata == METADATA diff --git a/tests/durabletask/test_concurrency_options.py b/tests/durabletask/test_concurrency_options.py index b49b7ec..a923383 100644 --- a/tests/durabletask/test_concurrency_options.py +++ b/tests/durabletask/test_concurrency_options.py @@ -37,9 +37,7 @@ def test_partial_custom_options(): expected_default = 100 * processor_count expected_workers = processor_count + 4 - options = ConcurrencyOptions( - maximum_concurrent_activity_work_items=30 - ) + options = ConcurrencyOptions(maximum_concurrent_activity_work_items=30) assert options.maximum_concurrent_activity_work_items == 30 assert options.maximum_concurrent_orchestration_work_items == expected_default @@ -67,9 +65,7 @@ def test_worker_default_options(): expected_default = 100 * processor_count expected_workers = processor_count + 4 - assert ( - worker.concurrency_options.maximum_concurrent_activity_work_items == expected_default - ) + assert worker.concurrency_options.maximum_concurrent_activity_work_items == expected_default assert ( worker.concurrency_options.maximum_concurrent_orchestration_work_items == expected_default ) diff --git a/tests/durabletask/test_orchestration_e2e.py b/tests/durabletask/test_orchestration_e2e.py index f5651ff..3bd394d 100644 --- a/tests/durabletask/test_orchestration_e2e.py +++ b/tests/durabletask/test_orchestration_e2e.py @@ -16,7 +16,6 @@ def test_empty_orchestration(): - invoked = False def empty_orchestrator(ctx: task.OrchestrationContext, _): @@ -44,7 +43,6 @@ def empty_orchestrator(ctx: task.OrchestrationContext, _): def test_activity_sequence(): - def plus_one(_: task.ActivityContext, input: int) -> int: return input + 1 @@ -64,8 +62,7 @@ def sequence(ctx: task.OrchestrationContext, start_val: int): task_hub_client = client.TaskHubGrpcClient() id = task_hub_client.schedule_new_orchestration(sequence, input=1) - state = task_hub_client.wait_for_orchestration_completion( - id, timeout=30) + state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) assert state is not None assert state.name == task.get_name(sequence) @@ -78,7 +75,6 @@ def sequence(ctx: task.OrchestrationContext, start_val: int): def test_activity_error_handling(): - def throw(_: task.ActivityContext, input: int) -> int: raise RuntimeError("Kah-BOOOOM!!!") @@ -139,8 +135,7 @@ def parent_orchestrator(ctx: task.OrchestrationContext, count: int): # Fan out to multiple sub-orchestrations tasks = [] for _ in range(count): - tasks.append(ctx.call_sub_orchestrator( - orchestrator_child, input=3)) + tasks.append(ctx.call_sub_orchestrator(orchestrator_child, input=3)) # Wait for all sub-orchestrations to complete yield task.when_all(tasks) @@ -163,9 +158,9 @@ def parent_orchestrator(ctx: task.OrchestrationContext, count: int): def test_wait_for_multiple_external_events(): def orchestrator(ctx: task.OrchestrationContext, _): - a = yield ctx.wait_for_external_event('A') - b = yield ctx.wait_for_external_event('B') - c = yield ctx.wait_for_external_event('C') + a = yield ctx.wait_for_external_event("A") + b = yield ctx.wait_for_external_event("B") + c = yield ctx.wait_for_external_event("C") return [a, b, c] # Start a worker, which will connect to the sidecar in a background thread @@ -176,20 +171,20 @@ def orchestrator(ctx: task.OrchestrationContext, _): # Start the orchestration and immediately raise events to it. task_hub_client = client.TaskHubGrpcClient() id = task_hub_client.schedule_new_orchestration(orchestrator) - task_hub_client.raise_orchestration_event(id, 'A', data='a') - task_hub_client.raise_orchestration_event(id, 'B', data='b') - task_hub_client.raise_orchestration_event(id, 'C', data='c') + task_hub_client.raise_orchestration_event(id, "A", data="a") + task_hub_client.raise_orchestration_event(id, "B", data="b") + task_hub_client.raise_orchestration_event(id, "C", data="c") state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) assert state is not None assert state.runtime_status == client.OrchestrationStatus.COMPLETED - assert state.serialized_output == json.dumps(['a', 'b', 'c']) + assert state.serialized_output == json.dumps(["a", "b", "c"]) @pytest.mark.parametrize("raise_event", [True, False]) def test_wait_for_external_event_timeout(raise_event: bool): def orchestrator(ctx: task.OrchestrationContext, _): - approval: task.Task[bool] = ctx.wait_for_external_event('Approval') + approval: task.Task[bool] = ctx.wait_for_external_event("Approval") timeout = ctx.create_timer(timedelta(seconds=3)) winner = yield task.when_any([approval, timeout]) if winner == approval: @@ -206,7 +201,7 @@ def orchestrator(ctx: task.OrchestrationContext, _): task_hub_client = client.TaskHubGrpcClient() id = task_hub_client.schedule_new_orchestration(orchestrator) if raise_event: - task_hub_client.raise_orchestration_event(id, 'Approval') + task_hub_client.raise_orchestration_event(id, "Approval") state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) assert state is not None @@ -325,9 +320,13 @@ def parent_orchestrator(ctx: task.OrchestrationContext, count: int): time.sleep(delay_time) if recurse: - assert activity_counter == 0, "Activity should not have executed with recursive termination" + assert activity_counter == 0, ( + "Activity should not have executed with recursive termination" + ) else: - assert activity_counter == 5, "Activity should have executed without recursive termination" + assert activity_counter == 5, ( + "Activity should have executed without recursive termination" + ) def test_continue_as_new(): @@ -425,7 +424,8 @@ def test_retry_policies(): max_number_of_attempts=3, backoff_coefficient=1, max_retry_interval=timedelta(seconds=10), - retry_timeout=timedelta(seconds=30)) + retry_timeout=timedelta(seconds=30), + ) def parent_orchestrator_with_retry(ctx: task.OrchestrationContext, _): yield ctx.call_sub_orchestrator(child_orchestrator_with_retry, retry_policy=retry_policy) @@ -474,7 +474,8 @@ def test_retry_timeout(): max_number_of_attempts=5, backoff_coefficient=2, max_retry_interval=timedelta(seconds=10), - retry_timeout=timedelta(seconds=14)) + retry_timeout=timedelta(seconds=14), + ) def mock_orchestrator(ctx: task.OrchestrationContext, _): yield ctx.call_activity(throw_activity, retry_policy=retry_policy) @@ -502,7 +503,6 @@ def throw_activity(ctx: task.ActivityContext, _): def test_custom_status(): - def empty_orchestrator(ctx: task.OrchestrationContext, _): ctx.set_custom_status("foobaz") @@ -522,4 +522,4 @@ def empty_orchestrator(ctx: task.OrchestrationContext, _): assert state.runtime_status == client.OrchestrationStatus.COMPLETED assert state.serialized_input is None assert state.serialized_output is None - assert state.serialized_custom_status == "\"foobaz\"" + assert state.serialized_custom_status == '"foobaz"' diff --git a/tests/durabletask/test_orchestration_e2e_async.py b/tests/durabletask/test_orchestration_e2e_async.py index de586f1..2e34603 100644 --- a/tests/durabletask/test_orchestration_e2e_async.py +++ b/tests/durabletask/test_orchestration_e2e_async.py @@ -8,10 +8,9 @@ import pytest +from durabletask import task, worker from durabletask.aio.client import AsyncTaskHubGrpcClient from durabletask.client import OrchestrationStatus -from durabletask import task, worker - # NOTE: These tests assume a sidecar process is running. Example command: # go install github.com/microsoft/durabletask-go@main @@ -20,7 +19,6 @@ async def test_empty_orchestration(): - invoked = False def empty_orchestrator(ctx: task.OrchestrationContext, _): @@ -49,7 +47,6 @@ def empty_orchestrator(ctx: task.OrchestrationContext, _): async def test_activity_sequence(): - def plus_one(_: task.ActivityContext, input: int) -> int: return input + 1 @@ -83,7 +80,6 @@ def sequence(ctx: task.OrchestrationContext, start_val: int): async def test_activity_error_handling(): - def throw(_: task.ActivityContext, input: int) -> int: raise RuntimeError("Kah-BOOOOM!!!") @@ -145,8 +141,7 @@ def parent_orchestrator(ctx: task.OrchestrationContext, count: int): # Fan out to multiple sub-orchestrations tasks = [] for _ in range(count): - tasks.append(ctx.call_sub_orchestrator( - orchestrator_child, input=3)) + tasks.append(ctx.call_sub_orchestrator(orchestrator_child, input=3)) # Wait for all sub-orchestrations to complete yield task.when_all(tasks) @@ -170,9 +165,9 @@ def parent_orchestrator(ctx: task.OrchestrationContext, count: int): async def test_wait_for_multiple_external_events(): def orchestrator(ctx: task.OrchestrationContext, _): - a = yield ctx.wait_for_external_event('A') - b = yield ctx.wait_for_external_event('B') - c = yield ctx.wait_for_external_event('C') + a = yield ctx.wait_for_external_event("A") + b = yield ctx.wait_for_external_event("B") + c = yield ctx.wait_for_external_event("C") return [a, b, c] # Start a worker, which will connect to the sidecar in a background thread @@ -183,21 +178,21 @@ def orchestrator(ctx: task.OrchestrationContext, _): # Start the orchestration and immediately raise events to it. client = AsyncTaskHubGrpcClient() id = await client.schedule_new_orchestration(orchestrator) - await client.raise_orchestration_event(id, 'A', data='a') - await client.raise_orchestration_event(id, 'B', data='b') - await client.raise_orchestration_event(id, 'C', data='c') + await client.raise_orchestration_event(id, "A", data="a") + await client.raise_orchestration_event(id, "B", data="b") + await client.raise_orchestration_event(id, "C", data="c") state = await client.wait_for_orchestration_completion(id, timeout=30) await client.aclose() assert state is not None assert state.runtime_status == OrchestrationStatus.COMPLETED - assert state.serialized_output == json.dumps(['a', 'b', 'c']) + assert state.serialized_output == json.dumps(["a", "b", "c"]) @pytest.mark.parametrize("raise_event", [True, False]) async def test_wait_for_external_event_timeout(raise_event: bool): def orchestrator(ctx: task.OrchestrationContext, _): - approval: task.Task[bool] = ctx.wait_for_external_event('Approval') + approval: task.Task[bool] = ctx.wait_for_external_event("Approval") timeout = ctx.create_timer(timedelta(seconds=3)) winner = yield task.when_any([approval, timeout]) if winner == approval: @@ -214,7 +209,7 @@ def orchestrator(ctx: task.OrchestrationContext, _): client = AsyncTaskHubGrpcClient() id = await client.schedule_new_orchestration(orchestrator) if raise_event: - await client.raise_orchestration_event(id, 'Approval') + await client.raise_orchestration_event(id, "Approval") state = await client.wait_for_orchestration_completion(id, timeout=30) await client.aclose() @@ -380,7 +375,8 @@ async def test_retry_policies(): max_number_of_attempts=3, backoff_coefficient=1, max_retry_interval=timedelta(seconds=10), - retry_timeout=timedelta(seconds=30)) + retry_timeout=timedelta(seconds=30), + ) def parent_orchestrator_with_retry(ctx: task.OrchestrationContext, _): yield ctx.call_sub_orchestrator(child_orchestrator_with_retry, retry_policy=retry_policy) @@ -429,7 +425,8 @@ async def test_retry_timeout(): max_number_of_attempts=5, backoff_coefficient=2, max_retry_interval=timedelta(seconds=10), - retry_timeout=timedelta(seconds=14)) + retry_timeout=timedelta(seconds=14), + ) def mock_orchestrator(ctx: task.OrchestrationContext, _): yield ctx.call_activity(throw_activity, retry_policy=retry_policy) @@ -457,7 +454,6 @@ def throw_activity(ctx: task.ActivityContext, _): async def test_custom_status(): - def empty_orchestrator(ctx: task.OrchestrationContext, _): ctx.set_custom_status("foobaz") @@ -477,4 +473,4 @@ def empty_orchestrator(ctx: task.OrchestrationContext, _): assert state.runtime_status == OrchestrationStatus.COMPLETED assert state.serialized_input is None assert state.serialized_output is None - assert state.serialized_custom_status == "\"foobaz\"" + assert state.serialized_custom_status == '"foobaz"' diff --git a/tests/durabletask/test_orchestration_executor.py b/tests/durabletask/test_orchestration_executor.py index c784135..964512f 100644 --- a/tests/durabletask/test_orchestration_executor.py +++ b/tests/durabletask/test_orchestration_executor.py @@ -12,9 +12,10 @@ from durabletask import task, worker logging.basicConfig( - format='%(asctime)s.%(msecs)03d %(name)s %(levelname)s: %(message)s', - datefmt='%Y-%m-%d %H:%M:%S', - level=logging.DEBUG) + format="%(asctime)s.%(msecs)03d %(name)s %(levelname)s: %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", + level=logging.DEBUG, +) TEST_LOGGER = logging.getLogger("tests") TEST_INSTANCE_ID = "abc123" @@ -34,7 +35,9 @@ def orchestrator(ctx: task.OrchestrationContext, my_input: int): start_time = datetime.now() new_events = [ helpers.new_orchestrator_started_event(start_time), - helpers.new_execution_started_event(name, TEST_INSTANCE_ID, encoded_input=json.dumps(test_input)), + helpers.new_execution_started_event( + name, TEST_INSTANCE_ID, encoded_input=json.dumps(test_input) + ), ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, [], new_events) @@ -99,7 +102,8 @@ def delay_orchestrator(ctx: task.OrchestrationContext, _): new_events = [ helpers.new_orchestrator_started_event(start_time), - helpers.new_execution_started_event(name, TEST_INSTANCE_ID, encoded_input=None)] + helpers.new_execution_started_event(name, TEST_INSTANCE_ID, encoded_input=None), + ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, [], new_events) actions = result.actions @@ -129,9 +133,9 @@ def delay_orchestrator(ctx: task.OrchestrationContext, _): old_events = [ helpers.new_orchestrator_started_event(start_time), helpers.new_execution_started_event(name, TEST_INSTANCE_ID, encoded_input=None), - helpers.new_timer_created_event(1, expected_fire_at)] - new_events = [ - helpers.new_timer_fired_event(1, expected_fire_at)] + helpers.new_timer_created_event(1, expected_fire_at), + ] + new_events = [helpers.new_timer_fired_event(1, expected_fire_at)] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) @@ -145,6 +149,7 @@ def delay_orchestrator(ctx: task.OrchestrationContext, _): def test_schedule_activity_actions(): """Test the actions output for the call_activity orchestrator method""" + def dummy_activity(ctx, _): pass @@ -158,7 +163,8 @@ def orchestrator(ctx: task.OrchestrationContext, orchestrator_input): encoded_input = json.dumps(42) new_events = [ helpers.new_orchestrator_started_event(), - helpers.new_execution_started_event(name, TEST_INSTANCE_ID, encoded_input)] + helpers.new_execution_started_event(name, TEST_INSTANCE_ID, encoded_input), + ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, [], new_events) actions = result.actions @@ -173,6 +179,7 @@ def orchestrator(ctx: task.OrchestrationContext, orchestrator_input): def test_schedule_activity_actions_router_without_app_id(): """Tests that scheduleTask action contains correct router fields when app_id is specified""" + def dummy_activity(ctx, _): pass @@ -198,13 +205,14 @@ def orchestrator(ctx: task.OrchestrationContext, _): assert len(actions) == 1 action = actions[0] assert action.router.sourceAppID == "source-app" - assert action.router.targetAppID == '' + assert action.router.targetAppID == "" assert action.scheduleTask.router.sourceAppID == "source-app" - assert action.scheduleTask.router.targetAppID == '' + assert action.scheduleTask.router.targetAppID == "" def test_schedule_activity_actions_router_with_app_id(): """Tests that scheduleTask action contains correct router fields when app_id is specified""" + def dummy_activity(ctx, _): pass @@ -251,7 +259,8 @@ def orchestrator(ctx: task.OrchestrationContext, orchestrator_input): old_events = [ helpers.new_orchestrator_started_event(), helpers.new_execution_started_event(name, TEST_INSTANCE_ID, encoded_input=None), - helpers.new_task_scheduled_event(1, task.get_name(dummy_activity))] + helpers.new_task_scheduled_event(1, task.get_name(dummy_activity)), + ] encoded_output = json.dumps("done!") new_events = [helpers.new_task_completed_event(1, encoded_output)] @@ -267,6 +276,7 @@ def orchestrator(ctx: task.OrchestrationContext, orchestrator_input): def test_activity_task_failed(): """Tests the failure of an activity task""" + def dummy_activity(ctx, _): pass @@ -280,7 +290,8 @@ def orchestrator(ctx: task.OrchestrationContext, orchestrator_input): old_events = [ helpers.new_orchestrator_started_event(), helpers.new_execution_started_event(name, TEST_INSTANCE_ID, encoded_input=None), - helpers.new_task_scheduled_event(1, task.get_name(dummy_activity))] + helpers.new_task_scheduled_event(1, task.get_name(dummy_activity)), + ] ex = Exception("Kah-BOOOOM!!!") new_events = [helpers.new_task_failed_event(1, ex)] @@ -291,7 +302,9 @@ def orchestrator(ctx: task.OrchestrationContext, orchestrator_input): complete_action = get_and_validate_single_complete_orchestration_action(actions) assert complete_action.orchestrationStatus == pb.ORCHESTRATION_STATUS_FAILED - assert complete_action.failureDetails.errorType == 'TaskFailedError' # TODO: Should this be the specific error? + assert ( + complete_action.failureDetails.errorType == "TaskFailedError" + ) # TODO: Should this be the specific error? assert str(ex) in complete_action.failureDetails.errorMessage # Make sure the line of code where the exception was raised is included in the stack trace @@ -313,8 +326,10 @@ def orchestrator(ctx: task.OrchestrationContext, orchestrator_input): max_number_of_attempts=6, backoff_coefficient=2, max_retry_interval=timedelta(seconds=10), - retry_timeout=timedelta(seconds=50)), - input=orchestrator_input) + retry_timeout=timedelta(seconds=50), + ), + input=orchestrator_input, + ) return result registry = worker._Registry() @@ -325,12 +340,14 @@ def orchestrator(ctx: task.OrchestrationContext, orchestrator_input): old_events = [ helpers.new_orchestrator_started_event(timestamp=current_timestamp), helpers.new_execution_started_event(name, TEST_INSTANCE_ID, encoded_input=None), - helpers.new_task_scheduled_event(1, task.get_name(dummy_activity))] + helpers.new_task_scheduled_event(1, task.get_name(dummy_activity)), + ] expected_fire_at = current_timestamp + timedelta(seconds=1) new_events = [ helpers.new_orchestrator_started_event(timestamp=current_timestamp), - helpers.new_task_failed_event(1, ValueError("Kah-BOOOOM!!!"))] + helpers.new_task_failed_event(1, ValueError("Kah-BOOOOM!!!")), + ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) actions = result.actions @@ -344,7 +361,8 @@ def orchestrator(ctx: task.OrchestrationContext, orchestrator_input): old_events = old_events + new_events new_events = [ helpers.new_orchestrator_started_event(current_timestamp), - helpers.new_timer_fired_event(2, current_timestamp)] + helpers.new_timer_fired_event(2, current_timestamp), + ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) actions = result.actions @@ -357,7 +375,8 @@ def orchestrator(ctx: task.OrchestrationContext, orchestrator_input): expected_fire_at = current_timestamp + timedelta(seconds=2) new_events = [ helpers.new_orchestrator_started_event(current_timestamp), - helpers.new_task_failed_event(1, ValueError("Kah-BOOOOM!!!"))] + helpers.new_task_failed_event(1, ValueError("Kah-BOOOOM!!!")), + ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) actions = result.actions @@ -371,7 +390,8 @@ def orchestrator(ctx: task.OrchestrationContext, orchestrator_input): old_events = old_events + new_events new_events = [ helpers.new_orchestrator_started_event(current_timestamp), - helpers.new_timer_fired_event(3, current_timestamp)] + helpers.new_timer_fired_event(3, current_timestamp), + ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) actions = result.actions @@ -384,7 +404,8 @@ def orchestrator(ctx: task.OrchestrationContext, orchestrator_input): old_events = old_events + new_events new_events = [ helpers.new_orchestrator_started_event(current_timestamp), - helpers.new_task_failed_event(1, ValueError("Kah-BOOOOM!!!"))] + helpers.new_task_failed_event(1, ValueError("Kah-BOOOOM!!!")), + ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) actions = result.actions @@ -398,7 +419,8 @@ def orchestrator(ctx: task.OrchestrationContext, orchestrator_input): old_events = old_events + new_events new_events = [ helpers.new_orchestrator_started_event(current_timestamp), - helpers.new_timer_fired_event(4, current_timestamp)] + helpers.new_timer_fired_event(4, current_timestamp), + ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) actions = result.actions @@ -411,7 +433,8 @@ def orchestrator(ctx: task.OrchestrationContext, orchestrator_input): old_events = old_events + new_events new_events = [ helpers.new_orchestrator_started_event(current_timestamp), - helpers.new_task_failed_event(1, ValueError("Kah-BOOOOM!!!"))] + helpers.new_task_failed_event(1, ValueError("Kah-BOOOOM!!!")), + ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) actions = result.actions @@ -425,7 +448,8 @@ def orchestrator(ctx: task.OrchestrationContext, orchestrator_input): old_events = old_events + new_events new_events = [ helpers.new_orchestrator_started_event(current_timestamp), - helpers.new_timer_fired_event(5, current_timestamp)] + helpers.new_timer_fired_event(5, current_timestamp), + ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) actions = result.actions @@ -439,7 +463,8 @@ def orchestrator(ctx: task.OrchestrationContext, orchestrator_input): old_events = old_events + new_events new_events = [ helpers.new_orchestrator_started_event(current_timestamp), - helpers.new_task_failed_event(1, ValueError("Kah-BOOOOM!!!"))] + helpers.new_task_failed_event(1, ValueError("Kah-BOOOOM!!!")), + ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) actions = result.actions @@ -453,7 +478,8 @@ def orchestrator(ctx: task.OrchestrationContext, orchestrator_input): old_events = old_events + new_events new_events = [ helpers.new_orchestrator_started_event(current_timestamp), - helpers.new_timer_fired_event(6, current_timestamp)] + helpers.new_timer_fired_event(6, current_timestamp), + ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) actions = result.actions @@ -465,17 +491,21 @@ def orchestrator(ctx: task.OrchestrationContext, orchestrator_input): old_events = old_events + new_events new_events = [ helpers.new_orchestrator_started_event(current_timestamp), - helpers.new_task_failed_event(1, ValueError("Kah-BOOOOM!!!"))] + helpers.new_task_failed_event(1, ValueError("Kah-BOOOOM!!!")), + ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) actions = result.actions assert len(actions) == 1 - assert actions[0].completeOrchestration.failureDetails.errorMessage.__contains__("Activity task #1 failed: Kah-BOOOOM!!!") + assert actions[0].completeOrchestration.failureDetails.errorMessage.__contains__( + "Activity task #1 failed: Kah-BOOOOM!!!" + ) assert actions[0].id == 7 def test_nondeterminism_expected_timer(): """Tests the non-determinism detection logic when call_timer is expected but some other method (call_activity) is called instead""" + def dummy_activity(ctx, _): pass @@ -490,7 +520,8 @@ def orchestrator(ctx: task.OrchestrationContext, _): old_events = [ helpers.new_orchestrator_started_event(), helpers.new_execution_started_event(name, TEST_INSTANCE_ID, encoded_input=None), - helpers.new_timer_created_event(1, fire_at)] + helpers.new_timer_created_event(1, fire_at), + ] new_events = [helpers.new_timer_fired_event(timer_id=1, fire_at=fire_at)] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) @@ -499,7 +530,7 @@ def orchestrator(ctx: task.OrchestrationContext, _): complete_action = get_and_validate_single_complete_orchestration_action(actions) assert complete_action.orchestrationStatus == pb.ORCHESTRATION_STATUS_FAILED - assert complete_action.failureDetails.errorType == 'NonDeterminismError' + assert complete_action.failureDetails.errorType == "NonDeterminismError" assert "1" in complete_action.failureDetails.errorMessage # task ID assert "create_timer" in complete_action.failureDetails.errorMessage # expected method name assert "call_activity" in complete_action.failureDetails.errorMessage # actual method name @@ -507,6 +538,7 @@ def orchestrator(ctx: task.OrchestrationContext, _): def test_nondeterminism_expected_activity_call_no_task_id(): """Tests the non-determinism detection logic when invoking activity functions""" + def orchestrator(ctx: task.OrchestrationContext, _): result = yield task.CompletableTask() # dummy task return result @@ -517,7 +549,8 @@ def orchestrator(ctx: task.OrchestrationContext, _): old_events = [ helpers.new_orchestrator_started_event(), helpers.new_execution_started_event(name, TEST_INSTANCE_ID, encoded_input=None), - helpers.new_task_scheduled_event(1, "bogus_activity")] + helpers.new_task_scheduled_event(1, "bogus_activity"), + ] new_events = [helpers.new_task_completed_event(1)] @@ -527,13 +560,14 @@ def orchestrator(ctx: task.OrchestrationContext, _): complete_action = get_and_validate_single_complete_orchestration_action(actions) assert complete_action.orchestrationStatus == pb.ORCHESTRATION_STATUS_FAILED - assert complete_action.failureDetails.errorType == 'NonDeterminismError' + assert complete_action.failureDetails.errorType == "NonDeterminismError" assert "1" in complete_action.failureDetails.errorMessage # task ID assert "call_activity" in complete_action.failureDetails.errorMessage # expected method name def test_nondeterminism_expected_activity_call_wrong_task_type(): """Tests the non-determinism detection when an activity exists in the history but a non-activity is in the code""" + def dummy_activity(ctx, _): pass @@ -547,7 +581,8 @@ def orchestrator(ctx: task.OrchestrationContext, _): old_events = [ helpers.new_orchestrator_started_event(), helpers.new_execution_started_event(name, TEST_INSTANCE_ID, encoded_input=None), - helpers.new_task_scheduled_event(1, task.get_name(dummy_activity))] + helpers.new_task_scheduled_event(1, task.get_name(dummy_activity)), + ] new_events = [helpers.new_task_completed_event(1)] @@ -557,7 +592,7 @@ def orchestrator(ctx: task.OrchestrationContext, _): complete_action = get_and_validate_single_complete_orchestration_action(actions) assert complete_action.orchestrationStatus == pb.ORCHESTRATION_STATUS_FAILED - assert complete_action.failureDetails.errorType == 'NonDeterminismError' + assert complete_action.failureDetails.errorType == "NonDeterminismError" assert "1" in complete_action.failureDetails.errorMessage # task ID assert "call_activity" in complete_action.failureDetails.errorMessage # expected method name assert "create_timer" in complete_action.failureDetails.errorMessage # unexpected method name @@ -565,6 +600,7 @@ def orchestrator(ctx: task.OrchestrationContext, _): def test_nondeterminism_wrong_activity_name(): """Tests the non-determinism detection when calling an activity with a name that differs from the name in the history""" + def dummy_activity(ctx, _): pass @@ -578,7 +614,8 @@ def orchestrator(ctx: task.OrchestrationContext, _): old_events = [ helpers.new_orchestrator_started_event(), helpers.new_execution_started_event(name, TEST_INSTANCE_ID, encoded_input=None), - helpers.new_task_scheduled_event(1, "original_activity")] + helpers.new_task_scheduled_event(1, "original_activity"), + ] new_events = [helpers.new_task_completed_event(1)] @@ -588,15 +625,20 @@ def orchestrator(ctx: task.OrchestrationContext, _): complete_action = get_and_validate_single_complete_orchestration_action(actions) assert complete_action.orchestrationStatus == pb.ORCHESTRATION_STATUS_FAILED - assert complete_action.failureDetails.errorType == 'NonDeterminismError' + assert complete_action.failureDetails.errorType == "NonDeterminismError" assert "1" in complete_action.failureDetails.errorMessage # task ID assert "call_activity" in complete_action.failureDetails.errorMessage # expected method name - assert "original_activity" in complete_action.failureDetails.errorMessage # expected activity name - assert "dummy_activity" in complete_action.failureDetails.errorMessage # unexpected activity name + assert ( + "original_activity" in complete_action.failureDetails.errorMessage + ) # expected activity name + assert ( + "dummy_activity" in complete_action.failureDetails.errorMessage + ) # unexpected activity name def test_sub_orchestration_task_completion(): """Tests that a sub-orchestration task is completed when the sub-orchestration completes""" + def suborchestrator(ctx: task.OrchestrationContext, _): pass @@ -610,11 +652,15 @@ def orchestrator(ctx: task.OrchestrationContext, _): old_events = [ helpers.new_orchestrator_started_event(), - helpers.new_execution_started_event(orchestrator_name, TEST_INSTANCE_ID, encoded_input=None), - helpers.new_sub_orchestration_created_event(1, suborchestrator_name, "sub-orch-123", encoded_input=None)] + helpers.new_execution_started_event( + orchestrator_name, TEST_INSTANCE_ID, encoded_input=None + ), + helpers.new_sub_orchestration_created_event( + 1, suborchestrator_name, "sub-orch-123", encoded_input=None + ), + ] - new_events = [ - helpers.new_sub_orchestration_completed_event(1, encoded_output="42")] + new_events = [helpers.new_sub_orchestration_completed_event(1, encoded_output="42")] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) @@ -627,6 +673,7 @@ def orchestrator(ctx: task.OrchestrationContext, _): def test_create_sub_orchestration_actions_router_without_app_id(): """Tests that createSubOrchestration action contains correct router fields when app_id is specified""" + def suborchestrator(ctx: task.OrchestrationContext, _): pass @@ -637,7 +684,9 @@ def orchestrator(ctx: task.OrchestrationContext, _): registry.add_orchestrator(suborchestrator) orchestrator_name = registry.add_orchestrator(orchestrator) - exec_evt = helpers.new_execution_started_event(orchestrator_name, TEST_INSTANCE_ID, encoded_input=None) + exec_evt = helpers.new_execution_started_event( + orchestrator_name, TEST_INSTANCE_ID, encoded_input=None + ) exec_evt.router.sourceAppID = "source-app" new_events = [ @@ -652,13 +701,14 @@ def orchestrator(ctx: task.OrchestrationContext, _): assert len(actions) == 1 action = actions[0] assert action.router.sourceAppID == "source-app" - assert action.router.targetAppID == '' + assert action.router.targetAppID == "" assert action.createSubOrchestration.router.sourceAppID == "source-app" - assert action.createSubOrchestration.router.targetAppID == '' + assert action.createSubOrchestration.router.targetAppID == "" def test_create_sub_orchestration_actions_router_with_app_id(): """Tests that createSubOrchestration action contains correct router fields when app_id is specified""" + def suborchestrator(ctx: task.OrchestrationContext, _): pass @@ -669,7 +719,9 @@ def orchestrator(ctx: task.OrchestrationContext, _): registry.add_orchestrator(suborchestrator) orchestrator_name = registry.add_orchestrator(orchestrator) - exec_evt = helpers.new_execution_started_event(orchestrator_name, TEST_INSTANCE_ID, encoded_input=None) + exec_evt = helpers.new_execution_started_event( + orchestrator_name, TEST_INSTANCE_ID, encoded_input=None + ) exec_evt.router.sourceAppID = "source-app" new_events = [ @@ -691,6 +743,7 @@ def orchestrator(ctx: task.OrchestrationContext, _): def test_sub_orchestration_task_failed(): """Tests that a sub-orchestration task is completed when the sub-orchestration fails""" + def suborchestrator(ctx: task.OrchestrationContext, _): pass @@ -704,8 +757,13 @@ def orchestrator(ctx: task.OrchestrationContext, _): old_events = [ helpers.new_orchestrator_started_event(), - helpers.new_execution_started_event(orchestrator_name, TEST_INSTANCE_ID, encoded_input=None), - helpers.new_sub_orchestration_created_event(1, suborchestrator_name, "sub-orch-123", encoded_input=None)] + helpers.new_execution_started_event( + orchestrator_name, TEST_INSTANCE_ID, encoded_input=None + ), + helpers.new_sub_orchestration_created_event( + 1, suborchestrator_name, "sub-orch-123", encoded_input=None + ), + ] ex = Exception("Kah-BOOOOM!!!") new_events = [helpers.new_sub_orchestration_failed_event(1, ex)] @@ -716,7 +774,9 @@ def orchestrator(ctx: task.OrchestrationContext, _): complete_action = get_and_validate_single_complete_orchestration_action(actions) assert complete_action.orchestrationStatus == pb.ORCHESTRATION_STATUS_FAILED - assert complete_action.failureDetails.errorType == 'TaskFailedError' # TODO: Should this be the specific error? + assert ( + complete_action.failureDetails.errorType == "TaskFailedError" + ) # TODO: Should this be the specific error? assert str(ex) in complete_action.failureDetails.errorMessage # Make sure the line of code where the exception was raised is included in the stack trace @@ -726,6 +786,7 @@ def orchestrator(ctx: task.OrchestrationContext, _): def test_nondeterminism_expected_sub_orchestration_task_completion_no_task(): """Tests the non-determinism detection when a sub-orchestration action is encounteed when it shouldn't be""" + def orchestrator(ctx: task.OrchestrationContext, _): result = yield task.CompletableTask() # dummy task return result @@ -735,11 +796,15 @@ def orchestrator(ctx: task.OrchestrationContext, _): old_events = [ helpers.new_orchestrator_started_event(), - helpers.new_execution_started_event(orchestrator_name, TEST_INSTANCE_ID, encoded_input=None), - helpers.new_sub_orchestration_created_event(1, "some_sub_orchestration", "sub-orch-123", encoded_input=None)] + helpers.new_execution_started_event( + orchestrator_name, TEST_INSTANCE_ID, encoded_input=None + ), + helpers.new_sub_orchestration_created_event( + 1, "some_sub_orchestration", "sub-orch-123", encoded_input=None + ), + ] - new_events = [ - helpers.new_sub_orchestration_completed_event(1, encoded_output="42")] + new_events = [helpers.new_sub_orchestration_completed_event(1, encoded_output="42")] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) @@ -747,17 +812,22 @@ def orchestrator(ctx: task.OrchestrationContext, _): complete_action = get_and_validate_single_complete_orchestration_action(actions) assert complete_action.orchestrationStatus == pb.ORCHESTRATION_STATUS_FAILED - assert complete_action.failureDetails.errorType == 'NonDeterminismError' + assert complete_action.failureDetails.errorType == "NonDeterminismError" assert "1" in complete_action.failureDetails.errorMessage # task ID - assert "call_sub_orchestrator" in complete_action.failureDetails.errorMessage # expected method name + assert ( + "call_sub_orchestrator" in complete_action.failureDetails.errorMessage + ) # expected method name def test_nondeterminism_expected_sub_orchestration_task_completion_wrong_task_type(): """Tests the non-determinism detection when a sub-orchestration action is encounteed when it shouldn't be. This variation tests the case where the expected task type is wrong (e.g. the code schedules a timer task but the history contains a sub-orchestration completed task).""" + def orchestrator(ctx: task.OrchestrationContext, _): - result = yield ctx.create_timer(datetime.utcnow()) # created timer but history expects sub-orchestration + result = yield ctx.create_timer( + datetime.utcnow() + ) # created timer but history expects sub-orchestration return result registry = worker._Registry() @@ -765,11 +835,15 @@ def orchestrator(ctx: task.OrchestrationContext, _): old_events = [ helpers.new_orchestrator_started_event(), - helpers.new_execution_started_event(orchestrator_name, TEST_INSTANCE_ID, encoded_input=None), - helpers.new_sub_orchestration_created_event(1, "some_sub_orchestration", "sub-orch-123", encoded_input=None)] + helpers.new_execution_started_event( + orchestrator_name, TEST_INSTANCE_ID, encoded_input=None + ), + helpers.new_sub_orchestration_created_event( + 1, "some_sub_orchestration", "sub-orch-123", encoded_input=None + ), + ] - new_events = [ - helpers.new_sub_orchestration_completed_event(1, encoded_output="42")] + new_events = [helpers.new_sub_orchestration_completed_event(1, encoded_output="42")] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) @@ -777,13 +851,16 @@ def orchestrator(ctx: task.OrchestrationContext, _): complete_action = get_and_validate_single_complete_orchestration_action(actions) assert complete_action.orchestrationStatus == pb.ORCHESTRATION_STATUS_FAILED - assert complete_action.failureDetails.errorType == 'NonDeterminismError' + assert complete_action.failureDetails.errorType == "NonDeterminismError" assert "1" in complete_action.failureDetails.errorMessage # task ID - assert "call_sub_orchestrator" in complete_action.failureDetails.errorMessage # expected method name + assert ( + "call_sub_orchestrator" in complete_action.failureDetails.errorMessage + ) # expected method name def test_raise_event(): """Tests that an orchestration can wait for and process an external event sent by a client""" + def orchestrator(ctx: task.OrchestrationContext, _): result = yield ctx.wait_for_external_event("my_event") return result @@ -794,7 +871,8 @@ def orchestrator(ctx: task.OrchestrationContext, _): old_events = [] new_events = [ helpers.new_orchestrator_started_event(), - helpers.new_execution_started_event(orchestrator_name, TEST_INSTANCE_ID)] + helpers.new_execution_started_event(orchestrator_name, TEST_INSTANCE_ID), + ] # Execute the orchestration until it is waiting for an external event. The result # should be an empty list of actions because the orchestration didn't schedule any work. @@ -817,6 +895,7 @@ def orchestrator(ctx: task.OrchestrationContext, _): def test_raise_event_buffered(): """Tests that an orchestration can receive an event that arrives earlier than expected""" + def orchestrator(ctx: task.OrchestrationContext, _): yield ctx.create_timer(ctx.current_utc_datetime + timedelta(days=1)) result = yield ctx.wait_for_external_event("my_event") @@ -829,7 +908,8 @@ def orchestrator(ctx: task.OrchestrationContext, _): new_events = [ helpers.new_orchestrator_started_event(), helpers.new_execution_started_event(orchestrator_name, TEST_INSTANCE_ID), - helpers.new_event_raised_event("my_event", encoded_input="42")] + helpers.new_event_raised_event("my_event", encoded_input="42"), + ] # Execute the orchestration. It should be in a running state waiting for the timer to fire executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) @@ -863,10 +943,12 @@ def orchestrator(ctx: task.OrchestrationContext, _): old_events = [ helpers.new_orchestrator_started_event(), - helpers.new_execution_started_event(orchestrator_name, TEST_INSTANCE_ID)] + helpers.new_execution_started_event(orchestrator_name, TEST_INSTANCE_ID), + ] new_events = [ helpers.new_suspend_event(), - helpers.new_event_raised_event("my_event", encoded_input="42")] + helpers.new_event_raised_event("my_event", encoded_input="42"), + ] # Execute the orchestration. It should remain in a running state because it was suspended prior # to processing the event raised event. @@ -898,10 +980,12 @@ def orchestrator(ctx: task.OrchestrationContext, _): old_events = [ helpers.new_orchestrator_started_event(), - helpers.new_execution_started_event(orchestrator_name, TEST_INSTANCE_ID)] + helpers.new_execution_started_event(orchestrator_name, TEST_INSTANCE_ID), + ] new_events = [ helpers.new_terminated_event(encoded_output=json.dumps("terminated!")), - helpers.new_event_raised_event("my_event", encoded_input="42")] + helpers.new_event_raised_event("my_event", encoded_input="42"), + ] # Execute the orchestration. It should be in a running state waiting for an external event executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) @@ -915,6 +999,7 @@ def orchestrator(ctx: task.OrchestrationContext, _): @pytest.mark.parametrize("save_events", [True, False]) def test_continue_as_new(save_events: bool): """Tests the behavior of the continue-as-new API""" + def orchestrator(ctx: task.OrchestrationContext, input: int): yield ctx.create_timer(ctx.current_utc_datetime + timedelta(days=1)) ctx.continue_as_new(input + 1, save_events=save_events) @@ -928,9 +1013,9 @@ def orchestrator(ctx: task.OrchestrationContext, input: int): helpers.new_event_raised_event("my_event", encoded_input="42"), helpers.new_event_raised_event("my_event", encoded_input="43"), helpers.new_event_raised_event("my_event", encoded_input="44"), - helpers.new_timer_created_event(1, datetime.utcnow() + timedelta(days=1))] - new_events = [ - helpers.new_timer_fired_event(1, datetime.utcnow() + timedelta(days=1))] + helpers.new_timer_created_event(1, datetime.utcnow() + timedelta(days=1)), + ] + new_events = [helpers.new_timer_fired_event(1, datetime.utcnow() + timedelta(days=1))] # Execute the orchestration. It should be in a running state waiting for the timer to fire executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) @@ -944,12 +1029,15 @@ def orchestrator(ctx: task.OrchestrationContext, input: int): event = complete_action.carryoverEvents[i] assert type(event) is pb.HistoryEvent assert event.HasField("eventRaised") - assert event.eventRaised.name.casefold() == "my_event".casefold() # event names are case-insensitive + assert ( + event.eventRaised.name.casefold() == "my_event".casefold() + ) # event names are case-insensitive assert event.eventRaised.input.value == json.dumps(42 + i) def test_fan_out(): """Tests that a fan-out pattern correctly schedules N tasks""" + def hello(_, name: str): return f"Hello {name}!" @@ -967,7 +1055,10 @@ def orchestrator(ctx: task.OrchestrationContext, count: int): old_events = [] new_events = [ helpers.new_orchestrator_started_event(), - helpers.new_execution_started_event(orchestrator_name, TEST_INSTANCE_ID, encoded_input="10")] + helpers.new_execution_started_event( + orchestrator_name, TEST_INSTANCE_ID, encoded_input="10" + ), + ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) @@ -983,6 +1074,7 @@ def orchestrator(ctx: task.OrchestrationContext, count: int): def test_fan_in(): """Tests that a fan-in pattern works correctly""" + def print_int(_, val: int): return str(val) @@ -999,15 +1091,20 @@ def orchestrator(ctx: task.OrchestrationContext, _): old_events = [ helpers.new_orchestrator_started_event(), - helpers.new_execution_started_event(orchestrator_name, TEST_INSTANCE_ID, encoded_input=None)] + helpers.new_execution_started_event( + orchestrator_name, TEST_INSTANCE_ID, encoded_input=None + ), + ] for i in range(10): - old_events.append(helpers.new_task_scheduled_event( - i + 1, activity_name, encoded_input=str(i))) + old_events.append( + helpers.new_task_scheduled_event(i + 1, activity_name, encoded_input=str(i)) + ) new_events = [] for i in range(10): - new_events.append(helpers.new_task_completed_event( - i + 1, encoded_output=print_int(None, i))) + new_events.append( + helpers.new_task_completed_event(i + 1, encoded_output=print_int(None, i)) + ) # First, test with only the first 5 events. We expect the orchestration to be running # but return zero actions since its still waiting for the other 5 tasks to complete. @@ -1028,6 +1125,7 @@ def orchestrator(ctx: task.OrchestrationContext, _): def test_fan_in_with_single_failure(): """Tests that a fan-in pattern works correctly when one of the tasks fails""" + def print_int(_, val: int): return str(val) @@ -1044,17 +1142,22 @@ def orchestrator(ctx: task.OrchestrationContext, _): old_events = [ helpers.new_orchestrator_started_event(), - helpers.new_execution_started_event(orchestrator_name, TEST_INSTANCE_ID, encoded_input=None)] + helpers.new_execution_started_event( + orchestrator_name, TEST_INSTANCE_ID, encoded_input=None + ), + ] for i in range(10): - old_events.append(helpers.new_task_scheduled_event( - i + 1, activity_name, encoded_input=str(i))) + old_events.append( + helpers.new_task_scheduled_event(i + 1, activity_name, encoded_input=str(i)) + ) # 5 of the tasks complete successfully, 1 fails, and 4 are still running. # The expectation is that the orchestration will fail immediately. new_events = [] for i in range(5): - new_events.append(helpers.new_task_completed_event( - i + 1, encoded_output=print_int(None, i))) + new_events.append( + helpers.new_task_completed_event(i + 1, encoded_output=print_int(None, i)) + ) ex = Exception("Kah-BOOOOM!!!") new_events.append(helpers.new_task_failed_event(6, ex)) @@ -1065,12 +1168,15 @@ def orchestrator(ctx: task.OrchestrationContext, _): complete_action = get_and_validate_single_complete_orchestration_action(actions) assert complete_action.orchestrationStatus == pb.ORCHESTRATION_STATUS_FAILED - assert complete_action.failureDetails.errorType == 'TaskFailedError' # TODO: Is this the right error type? + assert ( + complete_action.failureDetails.errorType == "TaskFailedError" + ) # TODO: Is this the right error type? assert str(ex) in complete_action.failureDetails.errorMessage def test_when_any(): """Tests that a when_any pattern works correctly""" + def hello(_, name: str): return f"Hello {name}!" @@ -1090,20 +1196,25 @@ def orchestrator(ctx: task.OrchestrationContext, _): # Test 1: Start the orchestration and let it yield on the when_any. We expect the orchestration # to return two actions: one to schedule the "Tokyo" task and one to schedule the "Seattle" task. old_events = [] - new_events = [helpers.new_execution_started_event(orchestrator_name, TEST_INSTANCE_ID, encoded_input=None)] + new_events = [ + helpers.new_execution_started_event(orchestrator_name, TEST_INSTANCE_ID, encoded_input=None) + ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) actions = result.actions assert len(actions) == 2 - assert actions[0].HasField('scheduleTask') - assert actions[1].HasField('scheduleTask') + assert actions[0].HasField("scheduleTask") + assert actions[1].HasField("scheduleTask") # The next tests assume that the orchestration has already awaited at the task.when_any() old_events = [ helpers.new_orchestrator_started_event(), - helpers.new_execution_started_event(orchestrator_name, TEST_INSTANCE_ID, encoded_input=None), + helpers.new_execution_started_event( + orchestrator_name, TEST_INSTANCE_ID, encoded_input=None + ), helpers.new_task_scheduled_event(1, activity_name, encoded_input=json.dumps("Tokyo")), - helpers.new_task_scheduled_event(2, activity_name, encoded_input=json.dumps("Seattle"))] + helpers.new_task_scheduled_event(2, activity_name, encoded_input=json.dumps("Seattle")), + ] # Test 2: Complete the "Tokyo" task. We expect the orchestration to complete with output "Hello, Tokyo!" encoded_output = json.dumps(hello(None, "Tokyo")) @@ -1128,20 +1239,24 @@ def orchestrator(ctx: task.OrchestrationContext, _): def test_when_any_with_retry(): """Tests that a when_any pattern works correctly with retries""" + def dummy_activity(_, inp: str): if inp == "Tokyo": raise ValueError("Kah-BOOOOM!!!") return f"Hello {inp}!" def orchestrator(ctx: task.OrchestrationContext, _): - t1 = ctx.call_activity(dummy_activity, - retry_policy=task.RetryPolicy( - first_retry_interval=timedelta(seconds=1), - max_number_of_attempts=6, - backoff_coefficient=2, - max_retry_interval=timedelta(seconds=10), - retry_timeout=timedelta(seconds=50)), - input="Tokyo") + t1 = ctx.call_activity( + dummy_activity, + retry_policy=task.RetryPolicy( + first_retry_interval=timedelta(seconds=1), + max_number_of_attempts=6, + backoff_coefficient=2, + max_retry_interval=timedelta(seconds=10), + retry_timeout=timedelta(seconds=50), + ), + input="Tokyo", + ) t2 = ctx.call_activity(dummy_activity, input="Seattle") winner = yield task.when_any([t1, t2]) if winner == t1: @@ -1157,14 +1272,18 @@ def orchestrator(ctx: task.OrchestrationContext, _): # Simulate the task failing for the first time and confirm that a timer is scheduled for 1 second in the future old_events = [ helpers.new_orchestrator_started_event(timestamp=current_timestamp), - helpers.new_execution_started_event(orchestrator_name, TEST_INSTANCE_ID, encoded_input=None), + helpers.new_execution_started_event( + orchestrator_name, TEST_INSTANCE_ID, encoded_input=None + ), helpers.new_task_scheduled_event(1, task.get_name(dummy_activity)), - helpers.new_task_scheduled_event(2, task.get_name(dummy_activity))] + helpers.new_task_scheduled_event(2, task.get_name(dummy_activity)), + ] expected_fire_at = current_timestamp + timedelta(seconds=1) new_events = [ helpers.new_orchestrator_started_event(timestamp=current_timestamp), - helpers.new_task_failed_event(1, ValueError("Kah-BOOOOM!!!"))] + helpers.new_task_failed_event(1, ValueError("Kah-BOOOOM!!!")), + ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) actions = result.actions @@ -1178,7 +1297,8 @@ def orchestrator(ctx: task.OrchestrationContext, _): old_events = old_events + new_events new_events = [ helpers.new_orchestrator_started_event(current_timestamp), - helpers.new_timer_fired_event(3, current_timestamp)] + helpers.new_timer_fired_event(3, current_timestamp), + ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) actions = result.actions @@ -1191,7 +1311,8 @@ def orchestrator(ctx: task.OrchestrationContext, _): expected_fire_at = current_timestamp + timedelta(seconds=2) new_events = [ helpers.new_orchestrator_started_event(current_timestamp), - helpers.new_task_failed_event(1, ValueError("Kah-BOOOOM!!!"))] + helpers.new_task_failed_event(1, ValueError("Kah-BOOOOM!!!")), + ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) actions = result.actions @@ -1213,20 +1334,24 @@ def orchestrator(ctx: task.OrchestrationContext, _): def test_when_all_with_retry(): """Tests that a when_all pattern works correctly with retries""" + def dummy_activity(ctx, inp: str): if inp == "Tokyo": raise ValueError("Kah-BOOOOM!!!") return f"Hello {inp}!" def orchestrator(ctx: task.OrchestrationContext, _): - t1 = ctx.call_activity(dummy_activity, - retry_policy=task.RetryPolicy( - first_retry_interval=timedelta(seconds=2), - max_number_of_attempts=3, - backoff_coefficient=4, - max_retry_interval=timedelta(seconds=5), - retry_timeout=timedelta(seconds=50)), - input="Tokyo") + t1 = ctx.call_activity( + dummy_activity, + retry_policy=task.RetryPolicy( + first_retry_interval=timedelta(seconds=2), + max_number_of_attempts=3, + backoff_coefficient=4, + max_retry_interval=timedelta(seconds=5), + retry_timeout=timedelta(seconds=50), + ), + input="Tokyo", + ) t2 = ctx.call_activity(dummy_activity, input="Seattle") results = yield task.when_all([t1, t2]) return results @@ -1239,14 +1364,18 @@ def orchestrator(ctx: task.OrchestrationContext, _): # Simulate the task failing for the first time and confirm that a timer is scheduled for 2 seconds in the future old_events = [ helpers.new_orchestrator_started_event(timestamp=current_timestamp), - helpers.new_execution_started_event(orchestrator_name, TEST_INSTANCE_ID, encoded_input=None), + helpers.new_execution_started_event( + orchestrator_name, TEST_INSTANCE_ID, encoded_input=None + ), helpers.new_task_scheduled_event(1, task.get_name(dummy_activity)), - helpers.new_task_scheduled_event(2, task.get_name(dummy_activity))] + helpers.new_task_scheduled_event(2, task.get_name(dummy_activity)), + ] expected_fire_at = current_timestamp + timedelta(seconds=2) new_events = [ helpers.new_orchestrator_started_event(timestamp=current_timestamp), - helpers.new_task_failed_event(1, ValueError("Kah-BOOOOM!!!"))] + helpers.new_task_failed_event(1, ValueError("Kah-BOOOOM!!!")), + ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) actions = result.actions @@ -1260,7 +1389,8 @@ def orchestrator(ctx: task.OrchestrationContext, _): old_events = old_events + new_events new_events = [ helpers.new_orchestrator_started_event(current_timestamp), - helpers.new_timer_fired_event(3, current_timestamp)] + helpers.new_timer_fired_event(3, current_timestamp), + ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) actions = result.actions @@ -1273,7 +1403,8 @@ def orchestrator(ctx: task.OrchestrationContext, _): expected_fire_at = current_timestamp + timedelta(seconds=5) new_events = [ helpers.new_orchestrator_started_event(current_timestamp), - helpers.new_task_failed_event(1, ValueError("Kah-BOOOOM!!!"))] + helpers.new_task_failed_event(1, ValueError("Kah-BOOOOM!!!")), + ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) actions = result.actions @@ -1286,8 +1417,10 @@ def orchestrator(ctx: task.OrchestrationContext, _): # And, Simulate the timer firing at the expected time and confirm that another activity task is scheduled encoded_output = json.dumps(dummy_activity(None, "Seattle")) old_events = old_events + new_events - new_events = [helpers.new_task_completed_event(2, encoded_output), - helpers.new_timer_fired_event(4, current_timestamp)] + new_events = [ + helpers.new_task_completed_event(2, encoded_output), + helpers.new_timer_fired_event(4, current_timestamp), + ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) actions = result.actions @@ -1301,17 +1434,22 @@ def orchestrator(ctx: task.OrchestrationContext, _): old_events = old_events + new_events new_events = [ helpers.new_orchestrator_started_event(current_timestamp), - helpers.new_task_failed_event(1, ValueError("Kah-BOOOOM!!!"))] + helpers.new_task_failed_event(1, ValueError("Kah-BOOOOM!!!")), + ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) actions = result.actions complete_action = get_and_validate_single_complete_orchestration_action(actions) assert complete_action.orchestrationStatus == pb.ORCHESTRATION_STATUS_FAILED - assert complete_action.failureDetails.errorType == 'TaskFailedError' # TODO: Should this be the specific error? + assert ( + complete_action.failureDetails.errorType == "TaskFailedError" + ) # TODO: Should this be the specific error? assert str(ex) in complete_action.failureDetails.errorMessage -def get_and_validate_single_complete_orchestration_action(actions: list[pb.OrchestratorAction]) -> pb.CompleteOrchestrationAction: +def get_and_validate_single_complete_orchestration_action( + actions: list[pb.OrchestratorAction], +) -> pb.CompleteOrchestrationAction: assert len(actions) == 1 assert type(actions[0]) is pb.OrchestratorAction assert actions[0].HasField("completeOrchestration") diff --git a/tests/durabletask/test_orchestration_wait.py b/tests/durabletask/test_orchestration_wait.py index c27345f..49eab0e 100644 --- a/tests/durabletask/test_orchestration_wait.py +++ b/tests/durabletask/test_orchestration_wait.py @@ -1,15 +1,19 @@ from unittest.mock import Mock -from durabletask.client import TaskHubGrpcClient import pytest +from durabletask.client import TaskHubGrpcClient + @pytest.mark.parametrize("timeout", [None, 0, 5]) def test_wait_for_orchestration_start_timeout(timeout): instance_id = "test-instance" - from durabletask.internal.orchestrator_service_pb2 import GetInstanceResponse, \ - OrchestrationState, ORCHESTRATION_STATUS_RUNNING + from durabletask.internal.orchestrator_service_pb2 import ( + ORCHESTRATION_STATUS_RUNNING, + GetInstanceResponse, + OrchestrationState, + ) response = GetInstanceResponse() state = OrchestrationState() @@ -28,17 +32,20 @@ def test_wait_for_orchestration_start_timeout(timeout): c._stub.WaitForInstanceStart.assert_called_once() _, kwargs = c._stub.WaitForInstanceStart.call_args if timeout is None or timeout == 0: - assert kwargs.get('timeout') is None + assert kwargs.get("timeout") is None else: - assert kwargs.get('timeout') == timeout + assert kwargs.get("timeout") == timeout @pytest.mark.parametrize("timeout", [None, 0, 5]) def test_wait_for_orchestration_completion_timeout(timeout): instance_id = "test-instance" - from durabletask.internal.orchestrator_service_pb2 import GetInstanceResponse, \ - OrchestrationState, ORCHESTRATION_STATUS_COMPLETED + from durabletask.internal.orchestrator_service_pb2 import ( + ORCHESTRATION_STATUS_COMPLETED, + GetInstanceResponse, + OrchestrationState, + ) response = GetInstanceResponse() state = OrchestrationState() @@ -57,6 +64,6 @@ def test_wait_for_orchestration_completion_timeout(timeout): c._stub.WaitForInstanceCompletion.assert_called_once() _, kwargs = c._stub.WaitForInstanceCompletion.call_args if timeout is None or timeout == 0: - assert kwargs.get('timeout') is None + assert kwargs.get("timeout") is None else: - assert kwargs.get('timeout') == timeout + assert kwargs.get("timeout") == timeout diff --git a/tests/durabletask/test_worker_concurrency_loop.py b/tests/durabletask/test_worker_concurrency_loop.py index de6753b..53b6c9a 100644 --- a/tests/durabletask/test_worker_concurrency_loop.py +++ b/tests/durabletask/test_worker_concurrency_loop.py @@ -10,29 +10,30 @@ def __init__(self): self.completed = [] def CompleteOrchestratorTask(self, res): - self.completed.append(('orchestrator', res)) + self.completed.append(("orchestrator", res)) def CompleteActivityTask(self, res): - self.completed.append(('activity', res)) + self.completed.append(("activity", res)) class DummyRequest: def __init__(self, kind, instance_id): self.kind = kind self.instanceId = instance_id - self.orchestrationInstance = type('O', (), {'instanceId': instance_id}) - self.name = 'dummy' + self.orchestrationInstance = type("O", (), {"instanceId": instance_id}) + self.name = "dummy" self.taskId = 1 - self.input = type('I', (), {'value': ''}) + self.input = type("I", (), {"value": ""}) self.pastEvents = [] self.newEvents = [] def HasField(self, field): - return (field == 'orchestratorRequest' and self.kind == 'orchestrator') or \ - (field == 'activityRequest' and self.kind == 'activity') + return (field == "orchestratorRequest" and self.kind == "orchestrator") or ( + field == "activityRequest" and self.kind == "activity" + ) def WhichOneof(self, _): - return f'{self.kind}Request' + return f"{self.kind}Request" class DummyCompletionToken: @@ -50,33 +51,40 @@ def test_worker_concurrency_loop_sync(): def dummy_orchestrator(req, stub, completionToken): time.sleep(0.1) - stub.CompleteOrchestratorTask('ok') + stub.CompleteOrchestratorTask("ok") def dummy_activity(req, stub, completionToken): time.sleep(0.1) - stub.CompleteActivityTask('ok') + stub.CompleteActivityTask("ok") # Patch the worker's _execute_orchestrator and _execute_activity worker._execute_orchestrator = dummy_orchestrator worker._execute_activity = dummy_activity - orchestrator_requests = [DummyRequest('orchestrator', f'orch{i}') for i in range(3)] - activity_requests = [DummyRequest('activity', f'act{i}') for i in range(4)] + orchestrator_requests = [DummyRequest("orchestrator", f"orch{i}") for i in range(3)] + activity_requests = [DummyRequest("activity", f"act{i}") for i in range(4)] async def run_test(): # Start the worker manager's run loop in the background worker_task = asyncio.create_task(worker._async_worker_manager.run()) for req in orchestrator_requests: - worker._async_worker_manager.submit_orchestration(dummy_orchestrator, req, stub, DummyCompletionToken()) + worker._async_worker_manager.submit_orchestration( + dummy_orchestrator, req, stub, DummyCompletionToken() + ) for req in activity_requests: - worker._async_worker_manager.submit_activity(dummy_activity, req, stub, DummyCompletionToken()) + worker._async_worker_manager.submit_activity( + dummy_activity, req, stub, DummyCompletionToken() + ) await asyncio.sleep(1.0) - orchestrator_count = sum(1 for t, _ in stub.completed if t == 'orchestrator') - activity_count = sum(1 for t, _ in stub.completed if t == 'activity') - assert orchestrator_count == 3, f"Expected 3 orchestrator completions, got {orchestrator_count}" + orchestrator_count = sum(1 for t, _ in stub.completed if t == "orchestrator") + activity_count = sum(1 for t, _ in stub.completed if t == "activity") + assert orchestrator_count == 3, ( + f"Expected 3 orchestrator completions, got {orchestrator_count}" + ) assert activity_count == 4, f"Expected 4 activity completions, got {activity_count}" worker._async_worker_manager._shutdown = True await worker_task + asyncio.run(run_test()) @@ -116,6 +124,7 @@ def fn(*args, **kwargs): with lock: results.append((kind, idx)) return f"{kind}-{idx}-done" + return fn # Submit more work than concurrency allows diff --git a/tests/durabletask/test_worker_concurrency_loop_async.py b/tests/durabletask/test_worker_concurrency_loop_async.py index c7ba238..a88e3e3 100644 --- a/tests/durabletask/test_worker_concurrency_loop_async.py +++ b/tests/durabletask/test_worker_concurrency_loop_async.py @@ -8,29 +8,30 @@ def __init__(self): self.completed = [] def CompleteOrchestratorTask(self, res): - self.completed.append(('orchestrator', res)) + self.completed.append(("orchestrator", res)) def CompleteActivityTask(self, res): - self.completed.append(('activity', res)) + self.completed.append(("activity", res)) class DummyRequest: def __init__(self, kind, instance_id): self.kind = kind self.instanceId = instance_id - self.orchestrationInstance = type('O', (), {'instanceId': instance_id}) - self.name = 'dummy' + self.orchestrationInstance = type("O", (), {"instanceId": instance_id}) + self.name = "dummy" self.taskId = 1 - self.input = type('I', (), {'value': ''}) + self.input = type("I", (), {"value": ""}) self.pastEvents = [] self.newEvents = [] def HasField(self, field): - return (field == 'orchestratorRequest' and self.kind == 'orchestrator') or \ - (field == 'activityRequest' and self.kind == 'activity') + return (field == "orchestratorRequest" and self.kind == "orchestrator") or ( + field == "activityRequest" and self.kind == "activity" + ) def WhichOneof(self, _): - return f'{self.kind}Request' + return f"{self.kind}Request" class DummyCompletionToken: @@ -48,33 +49,40 @@ def test_worker_concurrency_loop_async(): async def dummy_orchestrator(req, stub, completionToken): await asyncio.sleep(0.1) - stub.CompleteOrchestratorTask('ok') + stub.CompleteOrchestratorTask("ok") async def dummy_activity(req, stub, completionToken): await asyncio.sleep(0.1) - stub.CompleteActivityTask('ok') + stub.CompleteActivityTask("ok") # Patch the worker's _execute_orchestrator and _execute_activity grpc_worker._execute_orchestrator = dummy_orchestrator grpc_worker._execute_activity = dummy_activity - orchestrator_requests = [DummyRequest('orchestrator', f'orch{i}') for i in range(3)] - activity_requests = [DummyRequest('activity', f'act{i}') for i in range(4)] + orchestrator_requests = [DummyRequest("orchestrator", f"orch{i}") for i in range(3)] + activity_requests = [DummyRequest("activity", f"act{i}") for i in range(4)] async def run_test(): # Clear stub state before each run stub.completed.clear() worker_task = asyncio.create_task(grpc_worker._async_worker_manager.run()) for req in orchestrator_requests: - grpc_worker._async_worker_manager.submit_orchestration(dummy_orchestrator, req, stub, DummyCompletionToken()) + grpc_worker._async_worker_manager.submit_orchestration( + dummy_orchestrator, req, stub, DummyCompletionToken() + ) for req in activity_requests: - grpc_worker._async_worker_manager.submit_activity(dummy_activity, req, stub, DummyCompletionToken()) + grpc_worker._async_worker_manager.submit_activity( + dummy_activity, req, stub, DummyCompletionToken() + ) await asyncio.sleep(1.0) - orchestrator_count = sum(1 for t, _ in stub.completed if t == 'orchestrator') - activity_count = sum(1 for t, _ in stub.completed if t == 'activity') - assert orchestrator_count == 3, f"Expected 3 orchestrator completions, got {orchestrator_count}" + orchestrator_count = sum(1 for t, _ in stub.completed if t == "orchestrator") + activity_count = sum(1 for t, _ in stub.completed if t == "activity") + assert orchestrator_count == 3, ( + f"Expected 3 orchestrator completions, got {orchestrator_count}" + ) assert activity_count == 4, f"Expected 4 activity completions, got {activity_count}" grpc_worker._async_worker_manager._shutdown = True await worker_task + asyncio.run(run_test()) asyncio.run(run_test()) From 4eb8a9fe9816f16c82145685ff8b8ac513176a48 Mon Sep 17 00:00:00 2001 From: Filinto Duran <1373693+filintod@users.noreply.github.com> Date: Fri, 31 Oct 2025 10:38:36 -0500 Subject: [PATCH 49/81] share validating grpc options Signed-off-by: Filinto Duran <1373693+filintod@users.noreply.github.com> --- durabletask/aio/internal/shared.py | 14 ++-- durabletask/internal/shared.py | 17 +++-- .../test_grpc_aio_channel_options.py | 72 +++++++++---------- .../durabletask/test_grpc_channel_options.py | 66 ++++++++--------- tests/durabletask/test_orchestration_e2e.py | 2 +- 5 files changed, 89 insertions(+), 82 deletions(-) diff --git a/durabletask/aio/internal/shared.py b/durabletask/aio/internal/shared.py index edbb515..113f73b 100644 --- a/durabletask/aio/internal/shared.py +++ b/durabletask/aio/internal/shared.py @@ -1,15 +1,17 @@ # Copyright (c) The Dapr Authors. # Licensed under the MIT License. -from typing import Any, Optional, Sequence, Union +from typing import Any, Dict, Optional, Sequence, Union import grpc from grpc import aio as grpc_aio +from grpc.aio import ChannelArgumentType from durabletask.internal.shared import ( INSECURE_PROTOCOLS, SECURE_PROTOCOLS, get_default_host_address, + validate_grpc_options, ) ClientInterceptor = Union[ @@ -24,7 +26,7 @@ def get_grpc_aio_channel( host_address: Optional[str], secure_channel: bool = False, interceptors: Optional[Sequence[ClientInterceptor]] = None, - options: Optional[Sequence[tuple[str, Any]]] = None, + options: Optional[ChannelArgumentType] = None, ) -> grpc_aio.Channel: """create a grpc asyncio channel @@ -50,13 +52,9 @@ def get_grpc_aio_channel( break # channel interceptors/options - channel_kwargs = dict(interceptors=interceptors) + channel_kwargs: Dict[str, ChannelArgumentType | Sequence[ClientInterceptor]] = dict(interceptors=interceptors) if options is not None: - # validate all options keys prefix starts with `grpc.` - if not all(key.startswith('grpc.') for key, _ in options): - raise ValueError( - f'All options keys must start with `grpc.`. Invalid options: {options}' - ) + validate_grpc_options(options) channel_kwargs["options"] = options if secure_channel: diff --git a/durabletask/internal/shared.py b/durabletask/internal/shared.py index 9ecd722..9c7f111 100644 --- a/durabletask/internal/shared.py +++ b/durabletask/internal/shared.py @@ -9,6 +9,7 @@ from typing import Any, Optional, Sequence, Union import grpc +from grpc.aio import ChannelArgumentType ClientInterceptor = Union[ grpc.UnaryUnaryClientInterceptor, @@ -50,6 +51,17 @@ def get_default_host_address() -> str: return "localhost:4001" +def validate_grpc_options(options: ChannelArgumentType): + """Validate that all gRPC options are valid. Mainly checking keys. Values can be string, int, float, bool and pointer""" + for key, value in options: + if not isinstance(key, str): + raise ValueError(f"gRPC option key must be a string. Invalid key: {key}") + if not all(key.startswith("grpc.") for key, _ in options): + raise ValueError( + f"All options keys must start with `grpc.`. Invalid options: {options}" + ) + + def get_grpc_channel( host_address: Optional[str], secure_channel: bool = False, @@ -84,10 +96,7 @@ def get_grpc_channel( # Create the base channel if options is not None: # validate all options keys prefix starts with `grpc.` - if not all(key.startswith('grpc.') for key, _ in options): - raise ValueError( - f'All options keys must start with `grpc.`. Invalid options: {options}' - ) + validate_grpc_options(options) if secure_channel: channel = grpc.secure_channel( host_address, grpc.ssl_channel_credentials(), options=options diff --git a/tests/durabletask/test_grpc_aio_channel_options.py b/tests/durabletask/test_grpc_aio_channel_options.py index 54830c8..2f64577 100644 --- a/tests/durabletask/test_grpc_aio_channel_options.py +++ b/tests/durabletask/test_grpc_aio_channel_options.py @@ -5,34 +5,34 @@ from durabletask.aio.internal.shared import get_grpc_aio_channel -HOST_ADDRESS = 'localhost:50051' +HOST_ADDRESS = "localhost:50051" def _find_option(options, key): for k, v in options: if k == key: return v - raise AssertionError(f'Option with key {key} not found in options: {options}') + raise AssertionError(f"Option with key {key} not found in options: {options}") def test_aio_channel_passes_base_options_and_max_lengths(): base_options = [ - ('grpc.max_send_message_length', 4321), - ('grpc.max_receive_message_length', 8765), - ('grpc.primary_user_agent', 'durabletask-aio-tests'), + ("grpc.max_send_message_length", 4321), + ("grpc.max_receive_message_length", 8765), + ("grpc.primary_user_agent", "durabletask-aio-tests"), ] - with patch('durabletask.aio.internal.shared.grpc_aio.insecure_channel') as mock_channel: + with patch("durabletask.aio.internal.shared.grpc_aio.insecure_channel") as mock_channel: get_grpc_aio_channel(HOST_ADDRESS, False, options=base_options) # Ensure called with options kwarg assert mock_channel.call_count == 1 args, kwargs = mock_channel.call_args assert args[0] == HOST_ADDRESS - assert 'options' in kwargs - opts = kwargs['options'] + assert "options" in kwargs + opts = kwargs["options"] # Check our base options made it through - assert ('grpc.max_send_message_length', 4321) in opts - assert ('grpc.max_receive_message_length', 8765) in opts - assert ('grpc.primary_user_agent', 'durabletask-aio-tests') in opts + assert ("grpc.max_send_message_length", 4321) in opts + assert ("grpc.max_receive_message_length", 8765) in opts + assert ("grpc.primary_user_agent", "durabletask-aio-tests") in opts def test_aio_channel_merges_env_keepalive_and_retry(monkeypatch: pytest.MonkeyPatch): @@ -42,53 +42,53 @@ def test_aio_channel_merges_env_keepalive_and_retry(monkeypatch: pytest.MonkeyPa initial_backoff_ms = 250 max_backoff_ms = 2000 backoff_multiplier = 1.5 - codes = ['RESOURCE_EXHAUSTED'] + codes = ["RESOURCE_EXHAUSTED"] service_config = { - 'methodConfig': [ + "methodConfig": [ { - 'name': [{'service': ''}], # match all services/methods - 'retryPolicy': { - 'maxAttempts': max_attempts, - 'initialBackoff': f'{initial_backoff_ms / 1000.0}s', - 'maxBackoff': f'{max_backoff_ms / 1000.0}s', - 'backoffMultiplier': backoff_multiplier, - 'retryableStatusCodes': codes, + "name": [{"service": ""}], # match all services/methods + "retryPolicy": { + "maxAttempts": max_attempts, + "initialBackoff": f"{initial_backoff_ms / 1000.0}s", + "maxBackoff": f"{max_backoff_ms / 1000.0}s", + "backoffMultiplier": backoff_multiplier, + "retryableStatusCodes": codes, }, } ] } - base_options = [('grpc.service_config', json.dumps(service_config))] + base_options = [("grpc.service_config", json.dumps(service_config))] - with patch('durabletask.aio.internal.shared.grpc_aio.insecure_channel') as mock_channel: + with patch("durabletask.aio.internal.shared.grpc_aio.insecure_channel") as mock_channel: get_grpc_aio_channel(HOST_ADDRESS, False, options=base_options) args, kwargs = mock_channel.call_args assert args[0] == HOST_ADDRESS - assert 'options' in kwargs - opts = kwargs['options'] + assert "options" in kwargs + opts = kwargs["options"] # Retry service config present and parses correctly - svc_cfg_str = _find_option(opts, 'grpc.service_config') + svc_cfg_str = _find_option(opts, "grpc.service_config") svc_cfg = json.loads(svc_cfg_str) - assert 'methodConfig' in svc_cfg and isinstance(svc_cfg['methodConfig'], list) - retry_policy = svc_cfg['methodConfig'][0]['retryPolicy'] - assert retry_policy['maxAttempts'] == 4 - assert retry_policy['initialBackoff'] == f'{250 / 1000.0}s' - assert retry_policy['maxBackoff'] == f'{2000 / 1000.0}s' - assert retry_policy['backoffMultiplier'] == 1.5 + assert "methodConfig" in svc_cfg and isinstance(svc_cfg["methodConfig"], list) + retry_policy = svc_cfg["methodConfig"][0]["retryPolicy"] + assert retry_policy["maxAttempts"] == 4 + assert retry_policy["initialBackoff"] == f"{250 / 1000.0}s" + assert retry_policy["maxBackoff"] == f"{2000 / 1000.0}s" + assert retry_policy["backoffMultiplier"] == 1.5 # Codes are upper-cased list - assert 'RESOURCE_EXHAUSTED' in retry_policy['retryableStatusCodes'] + assert "RESOURCE_EXHAUSTED" in retry_policy["retryableStatusCodes"] def test_aio_secure_channel_receives_options_when_secure_true(): - base_options = [('grpc.max_receive_message_length', 999999)] + base_options = [("grpc.max_receive_message_length", 999999)] with ( - patch('durabletask.aio.internal.shared.grpc_aio.secure_channel') as mock_channel, - patch('grpc.ssl_channel_credentials') as mock_credentials, + patch("durabletask.aio.internal.shared.grpc_aio.secure_channel") as mock_channel, + patch("grpc.ssl_channel_credentials") as mock_credentials, ): get_grpc_aio_channel(HOST_ADDRESS, True, options=base_options) args, kwargs = mock_channel.call_args assert args[0] == HOST_ADDRESS assert args[1] == mock_credentials.return_value - assert ('grpc.max_receive_message_length', 999999) in kwargs.get('options', []) + assert ("grpc.max_receive_message_length", 999999) in kwargs.get("options", []) diff --git a/tests/durabletask/test_grpc_channel_options.py b/tests/durabletask/test_grpc_channel_options.py index b8ac533..841d75b 100644 --- a/tests/durabletask/test_grpc_channel_options.py +++ b/tests/durabletask/test_grpc_channel_options.py @@ -1,38 +1,38 @@ import json -from unittest.mock import ANY, patch +from unittest.mock import patch import pytest from durabletask.internal.shared import get_grpc_channel -HOST_ADDRESS = 'localhost:50051' +HOST_ADDRESS = "localhost:50051" def _find_option(options, key): for k, v in options: if k == key: return v - raise AssertionError(f'Option with key {key} not found in options: {options}') + raise AssertionError(f"Option with key {key} not found in options: {options}") def test_sync_channel_passes_base_options_and_max_lengths(): base_options = [ - ('grpc.max_send_message_length', 1234), - ('grpc.max_receive_message_length', 5678), - ('grpc.primary_user_agent', 'durabletask-tests'), + ("grpc.max_send_message_length", 1234), + ("grpc.max_receive_message_length", 5678), + ("grpc.primary_user_agent", "durabletask-tests"), ] - with patch('grpc.insecure_channel') as mock_channel: + with patch("grpc.insecure_channel") as mock_channel: get_grpc_channel(HOST_ADDRESS, False, options=base_options) # Ensure called with options kwarg assert mock_channel.call_count == 1 args, kwargs = mock_channel.call_args assert args[0] == HOST_ADDRESS - assert 'options' in kwargs - opts = kwargs['options'] + assert "options" in kwargs + opts = kwargs["options"] # Check our base options made it through - assert ('grpc.max_send_message_length', 1234) in opts - assert ('grpc.max_receive_message_length', 5678) in opts - assert ('grpc.primary_user_agent', 'durabletask-tests') in opts + assert ("grpc.max_send_message_length", 1234) in opts + assert ("grpc.max_receive_message_length", 5678) in opts + assert ("grpc.primary_user_agent", "durabletask-tests") in opts def test_sync_channel_merges_env_keepalive_and_retry(monkeypatch: pytest.MonkeyPatch): @@ -42,40 +42,40 @@ def test_sync_channel_merges_env_keepalive_and_retry(monkeypatch: pytest.MonkeyP initial_backoff_ms = 250 max_backoff_ms = 2000 backoff_multiplier = 1.5 - codes = ['ABORTED'] + codes = ["ABORTED"] service_config = { - 'methodConfig': [ + "methodConfig": [ { - 'name': [{'service': ''}], # match all services/methods - 'retryPolicy': { - 'maxAttempts': max_attempts, - 'initialBackoff': f'{initial_backoff_ms / 1000.0}s', - 'maxBackoff': f'{max_backoff_ms / 1000.0}s', - 'backoffMultiplier': backoff_multiplier, - 'retryableStatusCodes': codes, + "name": [{"service": ""}], # match all services/methods + "retryPolicy": { + "maxAttempts": max_attempts, + "initialBackoff": f"{initial_backoff_ms / 1000.0}s", + "maxBackoff": f"{max_backoff_ms / 1000.0}s", + "backoffMultiplier": backoff_multiplier, + "retryableStatusCodes": codes, }, } ] } - base_options = [('grpc.service_config', json.dumps(service_config))] + base_options = [("grpc.service_config", json.dumps(service_config))] - with patch('grpc.insecure_channel') as mock_channel: + with patch("grpc.insecure_channel") as mock_channel: get_grpc_channel(HOST_ADDRESS, False, options=base_options) args, kwargs = mock_channel.call_args assert args[0] == HOST_ADDRESS - assert 'options' in kwargs - opts = kwargs['options'] + assert "options" in kwargs + opts = kwargs["options"] # Retry service config present and parses correctly - svc_cfg_str = _find_option(opts, 'grpc.service_config') + svc_cfg_str = _find_option(opts, "grpc.service_config") svc_cfg = json.loads(svc_cfg_str) - assert 'methodConfig' in svc_cfg and isinstance(svc_cfg['methodConfig'], list) - retry_policy = svc_cfg['methodConfig'][0]['retryPolicy'] - assert retry_policy['maxAttempts'] == 4 - assert retry_policy['initialBackoff'] == f'{250 / 1000.0}s' - assert retry_policy['maxBackoff'] == f'{2000 / 1000.0}s' - assert retry_policy['backoffMultiplier'] == 1.5 + assert "methodConfig" in svc_cfg and isinstance(svc_cfg["methodConfig"], list) + retry_policy = svc_cfg["methodConfig"][0]["retryPolicy"] + assert retry_policy["maxAttempts"] == 4 + assert retry_policy["initialBackoff"] == f"{250 / 1000.0}s" + assert retry_policy["maxBackoff"] == f"{2000 / 1000.0}s" + assert retry_policy["backoffMultiplier"] == 1.5 # Codes are upper-cased list - assert 'ABORTED' in retry_policy['retryableStatusCodes'] + assert "ABORTED" in retry_policy["retryableStatusCodes"] diff --git a/tests/durabletask/test_orchestration_e2e.py b/tests/durabletask/test_orchestration_e2e.py index 634dfd9..b60c035 100644 --- a/tests/durabletask/test_orchestration_e2e.py +++ b/tests/durabletask/test_orchestration_e2e.py @@ -30,7 +30,7 @@ def empty_orchestrator(ctx: task.OrchestrationContext, _): # set a custom max send length option c = client.TaskHubGrpcClient( channel_options=[ - ('grpc.max_send_message_length', 1024 * 1024), # 1MB + ("grpc.max_send_message_length", 1024 * 1024), # 1MB ] ) id = c.schedule_new_orchestration(empty_orchestrator) From 02ef910198e93998a9fe0c425cb388f25276342a Mon Sep 17 00:00:00 2001 From: Filinto Duran <1373693+filintod@users.noreply.github.com> Date: Fri, 31 Oct 2025 10:40:03 -0500 Subject: [PATCH 50/81] ruff Signed-off-by: Filinto Duran <1373693+filintod@users.noreply.github.com> --- durabletask/aio/internal/shared.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/durabletask/aio/internal/shared.py b/durabletask/aio/internal/shared.py index 113f73b..d69ba9d 100644 --- a/durabletask/aio/internal/shared.py +++ b/durabletask/aio/internal/shared.py @@ -1,7 +1,7 @@ # Copyright (c) The Dapr Authors. # Licensed under the MIT License. -from typing import Any, Dict, Optional, Sequence, Union +from typing import Dict, Optional, Sequence, Union import grpc from grpc import aio as grpc_aio @@ -52,7 +52,9 @@ def get_grpc_aio_channel( break # channel interceptors/options - channel_kwargs: Dict[str, ChannelArgumentType | Sequence[ClientInterceptor]] = dict(interceptors=interceptors) + channel_kwargs: Dict[str, ChannelArgumentType | Sequence[ClientInterceptor]] = dict( + interceptors=interceptors + ) if options is not None: validate_grpc_options(options) channel_kwargs["options"] = options From 76444c822f03ce41ce97b1578620a5e0c5cca291 Mon Sep 17 00:00:00 2001 From: Filinto Duran <1373693+filintod@users.noreply.github.com> Date: Mon, 3 Nov 2025 12:28:58 -0600 Subject: [PATCH 51/81] tackle feedback Signed-off-by: Filinto Duran <1373693+filintod@users.noreply.github.com> --- durabletask/aio/internal/shared.py | 13 ++--- durabletask/internal/shared.py | 14 ++--- tests/durabletask/test_client.py | 59 +++++++++++++++----- tests/durabletask/test_client_async.py | 77 +++++++++++++++++++------- 4 files changed, 111 insertions(+), 52 deletions(-) diff --git a/durabletask/aio/internal/shared.py b/durabletask/aio/internal/shared.py index d69ba9d..3825fe6 100644 --- a/durabletask/aio/internal/shared.py +++ b/durabletask/aio/internal/shared.py @@ -1,7 +1,7 @@ # Copyright (c) The Dapr Authors. # Licensed under the MIT License. -from typing import Dict, Optional, Sequence, Union +from typing import Optional, Sequence, Union import grpc from grpc import aio as grpc_aio @@ -51,19 +51,16 @@ def get_grpc_aio_channel( host_address = host_address[len(protocol) :] break - # channel interceptors/options - channel_kwargs: Dict[str, ChannelArgumentType | Sequence[ClientInterceptor]] = dict( - interceptors=interceptors - ) if options is not None: validate_grpc_options(options) - channel_kwargs["options"] = options if secure_channel: channel = grpc_aio.secure_channel( - host_address, grpc.ssl_channel_credentials(), **channel_kwargs + host_address, grpc.ssl_channel_credentials(), interceptors=interceptors, options=options ) else: - channel = grpc_aio.insecure_channel(host_address, **channel_kwargs) + channel = grpc_aio.insecure_channel( + host_address, interceptors=interceptors, options=options + ) return channel diff --git a/durabletask/internal/shared.py b/durabletask/internal/shared.py index 9c7f111..d971f1d 100644 --- a/durabletask/internal/shared.py +++ b/durabletask/internal/shared.py @@ -97,17 +97,11 @@ def get_grpc_channel( if options is not None: # validate all options keys prefix starts with `grpc.` validate_grpc_options(options) - if secure_channel: - channel = grpc.secure_channel( - host_address, grpc.ssl_channel_credentials(), options=options - ) - else: - channel = grpc.insecure_channel(host_address, options=options) + + if secure_channel: + channel = grpc.secure_channel(host_address, grpc.ssl_channel_credentials(), options=options) else: - if secure_channel: - channel = grpc.secure_channel(host_address, grpc.ssl_channel_credentials()) - else: - channel = grpc.insecure_channel(host_address) + channel = grpc.insecure_channel(host_address, options=options) # Apply interceptors ONLY if they exist if interceptors: diff --git a/tests/durabletask/test_client.py b/tests/durabletask/test_client.py index d55e0e0..7f61c2f 100644 --- a/tests/durabletask/test_client.py +++ b/tests/durabletask/test_client.py @@ -1,4 +1,4 @@ -from unittest.mock import ANY, patch +from unittest.mock import patch from durabletask.internal.grpc_interceptor import DefaultClientInterceptorImpl from durabletask.internal.shared import get_default_host_address, get_grpc_channel @@ -11,7 +11,9 @@ def test_get_grpc_channel_insecure(): with patch("grpc.insecure_channel") as mock_channel: get_grpc_channel(HOST_ADDRESS, False, interceptors=INTERCEPTORS) - mock_channel.assert_called_once_with(HOST_ADDRESS) + args, kwargs = mock_channel.call_args + assert args[0] == HOST_ADDRESS + assert "options" in kwargs and kwargs["options"] is None def test_get_grpc_channel_secure(): @@ -20,13 +22,18 @@ def test_get_grpc_channel_secure(): patch("grpc.ssl_channel_credentials") as mock_credentials, ): get_grpc_channel(HOST_ADDRESS, True, interceptors=INTERCEPTORS) - mock_channel.assert_called_once_with(HOST_ADDRESS, mock_credentials.return_value) + args, kwargs = mock_channel.call_args + assert args[0] == HOST_ADDRESS + assert args[1] == mock_credentials.return_value + assert "options" in kwargs and kwargs["options"] is None def test_get_grpc_channel_default_host_address(): with patch("grpc.insecure_channel") as mock_channel: get_grpc_channel(None, False, interceptors=INTERCEPTORS) - mock_channel.assert_called_once_with(get_default_host_address()) + args, kwargs = mock_channel.call_args + assert args[0] == get_default_host_address() + assert "options" in kwargs and kwargs["options"] is None def test_get_grpc_channel_with_metadata(): @@ -35,7 +42,9 @@ def test_get_grpc_channel_with_metadata(): patch("grpc.intercept_channel") as mock_intercept_channel, ): get_grpc_channel(HOST_ADDRESS, False, interceptors=INTERCEPTORS) - mock_channel.assert_called_once_with(HOST_ADDRESS) + args, kwargs = mock_channel.call_args + assert args[0] == HOST_ADDRESS + assert "options" in kwargs and kwargs["options"] is None mock_intercept_channel.assert_called_once() # Capture and check the arguments passed to intercept_channel() @@ -54,40 +63,60 @@ def test_grpc_channel_with_host_name_protocol_stripping(): prefix = "grpc://" get_grpc_channel(prefix + host_name, interceptors=INTERCEPTORS) - mock_insecure_channel.assert_called_with(host_name) + args, kwargs = mock_insecure_channel.call_args + assert args[0] == host_name + assert "options" in kwargs and kwargs["options"] is None prefix = "http://" get_grpc_channel(prefix + host_name, interceptors=INTERCEPTORS) - mock_insecure_channel.assert_called_with(host_name) + args, kwargs = mock_insecure_channel.call_args + assert args[0] == host_name + assert "options" in kwargs and kwargs["options"] is None prefix = "HTTP://" get_grpc_channel(prefix + host_name, interceptors=INTERCEPTORS) - mock_insecure_channel.assert_called_with(host_name) + args, kwargs = mock_insecure_channel.call_args + assert args[0] == host_name + assert "options" in kwargs and kwargs["options"] is None prefix = "GRPC://" get_grpc_channel(prefix + host_name, interceptors=INTERCEPTORS) - mock_insecure_channel.assert_called_with(host_name) + args, kwargs = mock_insecure_channel.call_args + assert args[0] == host_name + assert "options" in kwargs and kwargs["options"] is None prefix = "" get_grpc_channel(prefix + host_name, interceptors=INTERCEPTORS) - mock_insecure_channel.assert_called_with(host_name) + args, kwargs = mock_insecure_channel.call_args + assert args[0] == host_name + assert "options" in kwargs and kwargs["options"] is None prefix = "grpcs://" get_grpc_channel(prefix + host_name, interceptors=INTERCEPTORS) - mock_secure_channel.assert_called_with(host_name, ANY) + args, kwargs = mock_secure_channel.call_args + assert args[0] == host_name + assert "options" in kwargs and kwargs["options"] is None prefix = "https://" get_grpc_channel(prefix + host_name, interceptors=INTERCEPTORS) - mock_secure_channel.assert_called_with(host_name, ANY) + args, kwargs = mock_secure_channel.call_args + assert args[0] == host_name + assert "options" in kwargs and kwargs["options"] is None prefix = "HTTPS://" get_grpc_channel(prefix + host_name, interceptors=INTERCEPTORS) - mock_secure_channel.assert_called_with(host_name, ANY) + args, kwargs = mock_secure_channel.call_args + assert args[0] == host_name + assert "options" in kwargs and kwargs["options"] is None prefix = "GRPCS://" get_grpc_channel(prefix + host_name, interceptors=INTERCEPTORS) - mock_secure_channel.assert_called_with(host_name, ANY) + args, kwargs = mock_secure_channel.call_args + assert args[0] == host_name + assert "options" in kwargs and kwargs["options"] is None prefix = "" get_grpc_channel(prefix + host_name, True, interceptors=INTERCEPTORS) - mock_secure_channel.assert_called_with(host_name, ANY) + args, kwargs = mock_secure_channel.call_args + assert args[0] == host_name + assert "options" in kwargs and kwargs["options"] is None diff --git a/tests/durabletask/test_client_async.py b/tests/durabletask/test_client_async.py index 0588ff1..9b6dfc3 100644 --- a/tests/durabletask/test_client_async.py +++ b/tests/durabletask/test_client_async.py @@ -1,7 +1,7 @@ # Copyright (c) The Dapr Authors. # Licensed under the MIT License. -from unittest.mock import ANY, patch +from unittest.mock import patch from durabletask.aio.client import AsyncTaskHubGrpcClient from durabletask.aio.internal.grpc_interceptor import DefaultClientInterceptorImpl @@ -16,7 +16,10 @@ def test_get_grpc_aio_channel_insecure(): with patch("durabletask.aio.internal.shared.grpc_aio.insecure_channel") as mock_channel: get_grpc_aio_channel(HOST_ADDRESS, False, interceptors=INTERCEPTORS_AIO) - mock_channel.assert_called_once_with(HOST_ADDRESS, interceptors=INTERCEPTORS_AIO) + args, kwargs = mock_channel.call_args + assert args[0] == HOST_ADDRESS + assert kwargs.get("interceptors") == INTERCEPTORS_AIO + assert "options" in kwargs and kwargs["options"] is None def test_get_grpc_aio_channel_secure(): @@ -25,23 +28,29 @@ def test_get_grpc_aio_channel_secure(): patch("grpc.ssl_channel_credentials") as mock_credentials, ): get_grpc_aio_channel(HOST_ADDRESS, True, interceptors=INTERCEPTORS_AIO) - mock_channel.assert_called_once_with( - HOST_ADDRESS, mock_credentials.return_value, interceptors=INTERCEPTORS_AIO - ) + args, kwargs = mock_channel.call_args + assert args[0] == HOST_ADDRESS + assert args[1] == mock_credentials.return_value + assert kwargs.get("interceptors") == INTERCEPTORS_AIO + assert "options" in kwargs and kwargs["options"] is None def test_get_grpc_aio_channel_default_host_address(): with patch("durabletask.aio.internal.shared.grpc_aio.insecure_channel") as mock_channel: get_grpc_aio_channel(None, False, interceptors=INTERCEPTORS_AIO) - mock_channel.assert_called_once_with( - get_default_host_address(), interceptors=INTERCEPTORS_AIO - ) + args, kwargs = mock_channel.call_args + assert args[0] == get_default_host_address() + assert kwargs.get("interceptors") == INTERCEPTORS_AIO + assert "options" in kwargs and kwargs["options"] is None def test_get_grpc_aio_channel_with_interceptors(): with patch("durabletask.aio.internal.shared.grpc_aio.insecure_channel") as mock_channel: get_grpc_aio_channel(HOST_ADDRESS, False, interceptors=INTERCEPTORS_AIO) - mock_channel.assert_called_once_with(HOST_ADDRESS, interceptors=INTERCEPTORS_AIO) + args, kwargs = mock_channel.call_args + assert args[0] == HOST_ADDRESS + assert kwargs.get("interceptors") == INTERCEPTORS_AIO + assert "options" in kwargs and kwargs["options"] is None # Capture and check the arguments passed to insecure_channel() args, kwargs = mock_channel.call_args @@ -61,43 +70,73 @@ def test_grpc_aio_channel_with_host_name_protocol_stripping(): prefix = "grpc://" get_grpc_aio_channel(prefix + host_name, interceptors=INTERCEPTORS_AIO) - mock_insecure_channel.assert_called_with(host_name, interceptors=INTERCEPTORS_AIO) + args, kwargs = mock_insecure_channel.call_args + assert args[0] == host_name + assert kwargs.get("interceptors") == INTERCEPTORS_AIO + assert "options" in kwargs and kwargs["options"] is None prefix = "http://" get_grpc_aio_channel(prefix + host_name, interceptors=INTERCEPTORS_AIO) - mock_insecure_channel.assert_called_with(host_name, interceptors=INTERCEPTORS_AIO) + args, kwargs = mock_insecure_channel.call_args + assert args[0] == host_name + assert kwargs.get("interceptors") == INTERCEPTORS_AIO + assert "options" in kwargs and kwargs["options"] is None prefix = "HTTP://" get_grpc_aio_channel(prefix + host_name, interceptors=INTERCEPTORS_AIO) - mock_insecure_channel.assert_called_with(host_name, interceptors=INTERCEPTORS_AIO) + args, kwargs = mock_insecure_channel.call_args + assert args[0] == host_name + assert kwargs.get("interceptors") == INTERCEPTORS_AIO + assert "options" in kwargs and kwargs["options"] is None prefix = "GRPC://" get_grpc_aio_channel(prefix + host_name, interceptors=INTERCEPTORS_AIO) - mock_insecure_channel.assert_called_with(host_name, interceptors=INTERCEPTORS_AIO) + args, kwargs = mock_insecure_channel.call_args + assert args[0] == host_name + assert kwargs.get("interceptors") == INTERCEPTORS_AIO + assert "options" in kwargs and kwargs["options"] is None prefix = "" get_grpc_aio_channel(prefix + host_name, interceptors=INTERCEPTORS_AIO) - mock_insecure_channel.assert_called_with(host_name, interceptors=INTERCEPTORS_AIO) + args, kwargs = mock_insecure_channel.call_args + assert args[0] == host_name + assert kwargs.get("interceptors") == INTERCEPTORS_AIO + assert "options" in kwargs and kwargs["options"] is None prefix = "grpcs://" get_grpc_aio_channel(prefix + host_name, interceptors=INTERCEPTORS_AIO) - mock_secure_channel.assert_called_with(host_name, ANY, interceptors=INTERCEPTORS_AIO) + args, kwargs = mock_secure_channel.call_args + assert args[0] == host_name + assert kwargs.get("interceptors") == INTERCEPTORS_AIO + assert "options" in kwargs and kwargs["options"] is None prefix = "https://" get_grpc_aio_channel(prefix + host_name, interceptors=INTERCEPTORS_AIO) - mock_secure_channel.assert_called_with(host_name, ANY, interceptors=INTERCEPTORS_AIO) + args, kwargs = mock_secure_channel.call_args + assert args[0] == host_name + assert kwargs.get("interceptors") == INTERCEPTORS_AIO + assert "options" in kwargs and kwargs["options"] is None prefix = "HTTPS://" get_grpc_aio_channel(prefix + host_name, interceptors=INTERCEPTORS_AIO) - mock_secure_channel.assert_called_with(host_name, ANY, interceptors=INTERCEPTORS_AIO) + args, kwargs = mock_secure_channel.call_args + assert args[0] == host_name + assert kwargs.get("interceptors") == INTERCEPTORS_AIO + assert "options" in kwargs and kwargs["options"] is None prefix = "GRPCS://" get_grpc_aio_channel(prefix + host_name, interceptors=INTERCEPTORS_AIO) - mock_secure_channel.assert_called_with(host_name, ANY, interceptors=INTERCEPTORS_AIO) + args, kwargs = mock_secure_channel.call_args + assert args[0] == host_name + assert kwargs.get("interceptors") == INTERCEPTORS_AIO + assert "options" in kwargs and kwargs["options"] is None prefix = "" get_grpc_aio_channel(prefix + host_name, True, interceptors=INTERCEPTORS_AIO) - mock_secure_channel.assert_called_with(host_name, ANY, interceptors=INTERCEPTORS_AIO) + args, kwargs = mock_secure_channel.call_args + assert args[0] == host_name + assert kwargs.get("interceptors") == INTERCEPTORS_AIO + assert "options" in kwargs and kwargs["options"] is None def test_async_client_construct_with_metadata(): From f3442db022d1b729334a9f9f8029175629861512 Mon Sep 17 00:00:00 2001 From: Filinto Duran <1373693+filintod@users.noreply.github.com> Date: Mon, 3 Nov 2025 14:45:21 -0600 Subject: [PATCH 52/81] add missing grpc option in worker grpc client Signed-off-by: Filinto Duran <1373693+filintod@users.noreply.github.com> --- durabletask/worker.py | 4 +++- tests/durabletask/test_orchestration_e2e.py | 13 ++++++++----- .../test_orchestration_e2e_async.py | 19 ++++++++++++++----- 3 files changed, 25 insertions(+), 11 deletions(-) diff --git a/durabletask/worker.py b/durabletask/worker.py index 2d057e1..b15ee98 100644 --- a/durabletask/worker.py +++ b/durabletask/worker.py @@ -223,6 +223,7 @@ def __init__( secure_channel: bool = False, interceptors: Optional[Sequence[shared.ClientInterceptor]] = None, concurrency_options: Optional[ConcurrencyOptions] = None, + channel_options: Optional[Sequence[tuple[str, Any]]] = None, ): self._registry = _Registry() self._host_address = host_address if host_address else shared.get_default_host_address() @@ -230,6 +231,7 @@ def __init__( self._shutdown = Event() self._is_running = False self._secure_channel = secure_channel + self._channel_options = channel_options # Use provided concurrency options or create default ones self._concurrency_options = ( @@ -306,7 +308,7 @@ def create_fresh_connection(): current_stub = None try: current_channel = shared.get_grpc_channel( - self._host_address, self._secure_channel, self._interceptors + self._host_address, self._secure_channel, self._interceptors, options=self._channel_options ) current_stub = stubs.TaskHubSidecarServiceStub(current_channel) current_stub.Hello(empty_pb2.Empty()) diff --git a/tests/durabletask/test_orchestration_e2e.py b/tests/durabletask/test_orchestration_e2e.py index b60c035..08de87b 100644 --- a/tests/durabletask/test_orchestration_e2e.py +++ b/tests/durabletask/test_orchestration_e2e.py @@ -11,7 +11,8 @@ from durabletask import client, task, worker # NOTE: These tests assume a sidecar process is running. Example command: -# docker run --name durabletask-sidecar -p 4001:4001 --env 'DURABLETASK_SIDECAR_LOGLEVEL=Debug' --rm cgillum/durabletask-sidecar:latest start --backend Emulator +# dapr init || true +# dapr run --app-id test-app --dapr-grpc-port 4001 pytestmark = pytest.mark.e2e @@ -22,16 +23,18 @@ def empty_orchestrator(ctx: task.OrchestrationContext, _): nonlocal invoked # don't do this in a real app! invoked = True + channel_options = [ + ("grpc.max_send_message_length", 1024 * 1024), # 1MB + ] + # Start a worker, which will connect to the sidecar in a background thread - with worker.TaskHubGrpcWorker() as w: + with worker.TaskHubGrpcWorker(channel_options=channel_options) as w: w.add_orchestrator(empty_orchestrator) w.start() # set a custom max send length option c = client.TaskHubGrpcClient( - channel_options=[ - ("grpc.max_send_message_length", 1024 * 1024), # 1MB - ] + channel_options=channel_options ) id = c.schedule_new_orchestration(empty_orchestrator) state = c.wait_for_orchestration_completion(id, timeout=30) diff --git a/tests/durabletask/test_orchestration_e2e_async.py b/tests/durabletask/test_orchestration_e2e_async.py index 2e34603..78b7937 100644 --- a/tests/durabletask/test_orchestration_e2e_async.py +++ b/tests/durabletask/test_orchestration_e2e_async.py @@ -13,7 +13,7 @@ from durabletask.client import OrchestrationStatus # NOTE: These tests assume a sidecar process is running. Example command: -# go install github.com/microsoft/durabletask-go@main +# go install github.com/dapr/durabletask-go@main # durabletask-go --port 4001 pytestmark = [pytest.mark.e2e, pytest.mark.asyncio] @@ -25,12 +25,16 @@ def empty_orchestrator(ctx: task.OrchestrationContext, _): nonlocal invoked # don't do this in a real app! invoked = True + channel_options = [ + ("grpc.max_send_message_length", 1024 * 1024), # 1MB + ] + # Start a worker, which will connect to the sidecar in a background thread - with worker.TaskHubGrpcWorker() as w: + with worker.TaskHubGrpcWorker(channel_options=channel_options) as w: w.add_orchestrator(empty_orchestrator) w.start() - c = AsyncTaskHubGrpcClient() + c = AsyncTaskHubGrpcClient(channel_options=channel_options) id = await c.schedule_new_orchestration(empty_orchestrator) state = await c.wait_for_orchestration_completion(id, timeout=30) await c.aclose() @@ -58,13 +62,18 @@ def sequence(ctx: task.OrchestrationContext, start_val: int): numbers.append(current) return numbers + channel_options =[ + ("grpc.max_send_message_length", 1024 * 1024), # 1MB + ] # Start a worker, which will connect to the sidecar in a background thread - with worker.TaskHubGrpcWorker() as w: + with worker.TaskHubGrpcWorker( + channel_options=channel_options + ) as w: w.add_orchestrator(sequence) w.add_activity(plus_one) w.start() - client = AsyncTaskHubGrpcClient() + client = AsyncTaskHubGrpcClient(channel_options=channel_options) id = await client.schedule_new_orchestration(sequence, input=1) state = await client.wait_for_orchestration_completion(id, timeout=30) await client.aclose() From f150b5a3029e7b3c805f42c86a92ddae61622cb1 Mon Sep 17 00:00:00 2001 From: Filinto Duran <1373693+filintod@users.noreply.github.com> Date: Tue, 4 Nov 2025 06:59:10 -0600 Subject: [PATCH 53/81] remove validate grpc key prefix Signed-off-by: Filinto Duran <1373693+filintod@users.noreply.github.com> --- durabletask/aio/internal/shared.py | 4 ---- durabletask/worker.py | 5 ++++- tests/durabletask/test_orchestration_e2e.py | 4 +--- tests/durabletask/test_orchestration_e2e_async.py | 10 ++++------ 4 files changed, 9 insertions(+), 14 deletions(-) diff --git a/durabletask/aio/internal/shared.py b/durabletask/aio/internal/shared.py index 3825fe6..cb4ffc0 100644 --- a/durabletask/aio/internal/shared.py +++ b/durabletask/aio/internal/shared.py @@ -11,7 +11,6 @@ INSECURE_PROTOCOLS, SECURE_PROTOCOLS, get_default_host_address, - validate_grpc_options, ) ClientInterceptor = Union[ @@ -51,9 +50,6 @@ def get_grpc_aio_channel( host_address = host_address[len(protocol) :] break - if options is not None: - validate_grpc_options(options) - if secure_channel: channel = grpc_aio.secure_channel( host_address, grpc.ssl_channel_credentials(), interceptors=interceptors, options=options diff --git a/durabletask/worker.py b/durabletask/worker.py index b15ee98..daa661b 100644 --- a/durabletask/worker.py +++ b/durabletask/worker.py @@ -308,7 +308,10 @@ def create_fresh_connection(): current_stub = None try: current_channel = shared.get_grpc_channel( - self._host_address, self._secure_channel, self._interceptors, options=self._channel_options + self._host_address, + self._secure_channel, + self._interceptors, + options=self._channel_options, ) current_stub = stubs.TaskHubSidecarServiceStub(current_channel) current_stub.Hello(empty_pb2.Empty()) diff --git a/tests/durabletask/test_orchestration_e2e.py b/tests/durabletask/test_orchestration_e2e.py index 08de87b..225456d 100644 --- a/tests/durabletask/test_orchestration_e2e.py +++ b/tests/durabletask/test_orchestration_e2e.py @@ -33,9 +33,7 @@ def empty_orchestrator(ctx: task.OrchestrationContext, _): w.start() # set a custom max send length option - c = client.TaskHubGrpcClient( - channel_options=channel_options - ) + c = client.TaskHubGrpcClient(channel_options=channel_options) id = c.schedule_new_orchestration(empty_orchestrator) state = c.wait_for_orchestration_completion(id, timeout=30) diff --git a/tests/durabletask/test_orchestration_e2e_async.py b/tests/durabletask/test_orchestration_e2e_async.py index 78b7937..c441bdc 100644 --- a/tests/durabletask/test_orchestration_e2e_async.py +++ b/tests/durabletask/test_orchestration_e2e_async.py @@ -62,13 +62,11 @@ def sequence(ctx: task.OrchestrationContext, start_val: int): numbers.append(current) return numbers - channel_options =[ - ("grpc.max_send_message_length", 1024 * 1024), # 1MB - ] + channel_options = [ + ("grpc.max_send_message_length", 1024 * 1024), # 1MB + ] # Start a worker, which will connect to the sidecar in a background thread - with worker.TaskHubGrpcWorker( - channel_options=channel_options - ) as w: + with worker.TaskHubGrpcWorker(channel_options=channel_options) as w: w.add_orchestrator(sequence) w.add_activity(plus_one) w.start() From d7910e76da18489b9a7a41d23bfebee060066e1d Mon Sep 17 00:00:00 2001 From: Filinto Duran <1373693+filintod@users.noreply.github.com> Date: Tue, 4 Nov 2025 07:37:09 -0600 Subject: [PATCH 54/81] include not-saved file on removal of validate grpc prefix Signed-off-by: Filinto Duran <1373693+filintod@users.noreply.github.com> --- durabletask/internal/shared.py | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/durabletask/internal/shared.py b/durabletask/internal/shared.py index d971f1d..34e5f73 100644 --- a/durabletask/internal/shared.py +++ b/durabletask/internal/shared.py @@ -51,17 +51,6 @@ def get_default_host_address() -> str: return "localhost:4001" -def validate_grpc_options(options: ChannelArgumentType): - """Validate that all gRPC options are valid. Mainly checking keys. Values can be string, int, float, bool and pointer""" - for key, value in options: - if not isinstance(key, str): - raise ValueError(f"gRPC option key must be a string. Invalid key: {key}") - if not all(key.startswith("grpc.") for key, _ in options): - raise ValueError( - f"All options keys must start with `grpc.`. Invalid options: {options}" - ) - - def get_grpc_channel( host_address: Optional[str], secure_channel: bool = False, @@ -93,11 +82,6 @@ def get_grpc_channel( host_address = host_address[len(protocol) :] break - # Create the base channel - if options is not None: - # validate all options keys prefix starts with `grpc.` - validate_grpc_options(options) - if secure_channel: channel = grpc.secure_channel(host_address, grpc.ssl_channel_credentials(), options=options) else: From 7980fd86b396b86ed36a94f5a5bdc75f635ebd10 Mon Sep 17 00:00:00 2001 From: Filinto Duran <1373693+filintod@users.noreply.github.com> Date: Tue, 4 Nov 2025 07:50:04 -0600 Subject: [PATCH 55/81] ruff/lint Signed-off-by: Filinto Duran <1373693+filintod@users.noreply.github.com> --- durabletask/internal/shared.py | 1 - 1 file changed, 1 deletion(-) diff --git a/durabletask/internal/shared.py b/durabletask/internal/shared.py index 34e5f73..3adb6b1 100644 --- a/durabletask/internal/shared.py +++ b/durabletask/internal/shared.py @@ -9,7 +9,6 @@ from typing import Any, Optional, Sequence, Union import grpc -from grpc.aio import ChannelArgumentType ClientInterceptor = Union[ grpc.UnaryUnaryClientInterceptor, From 5cddf4fc165e5718a793fd2e4be9f559259d7f00 Mon Sep 17 00:00:00 2001 From: Filinto Duran <1373693+filintod@users.noreply.github.com> Date: Tue, 4 Nov 2025 09:19:54 -0600 Subject: [PATCH 56/81] consolidate some options tests Signed-off-by: Filinto Duran <1373693+filintod@users.noreply.github.com> --- tests/durabletask/test_client.py | 20 ++++ tests/durabletask/test_client_async.py | 20 ++++ .../test_grpc_aio_channel_options.py | 94 ------------------- .../durabletask/test_grpc_channel_options.py | 81 ---------------- 4 files changed, 40 insertions(+), 175 deletions(-) delete mode 100644 tests/durabletask/test_grpc_aio_channel_options.py delete mode 100644 tests/durabletask/test_grpc_channel_options.py diff --git a/tests/durabletask/test_client.py b/tests/durabletask/test_client.py index 7f61c2f..b671cf8 100644 --- a/tests/durabletask/test_client.py +++ b/tests/durabletask/test_client.py @@ -120,3 +120,23 @@ def test_grpc_channel_with_host_name_protocol_stripping(): args, kwargs = mock_secure_channel.call_args assert args[0] == host_name assert "options" in kwargs and kwargs["options"] is None + + +def test_sync_channel_passes_base_options_and_max_lengths(): + base_options = [ + ("grpc.max_send_message_length", 1234), + ("grpc.max_receive_message_length", 5678), + ("grpc.primary_user_agent", "durabletask-tests"), + ] + with patch("grpc.insecure_channel") as mock_channel: + get_grpc_channel(HOST_ADDRESS, False, options=base_options) + # Ensure called with options kwarg + assert mock_channel.call_count == 1 + args, kwargs = mock_channel.call_args + assert args[0] == HOST_ADDRESS + assert "options" in kwargs + opts = kwargs["options"] + # Check our base options made it through + assert ("grpc.max_send_message_length", 1234) in opts + assert ("grpc.max_receive_message_length", 5678) in opts + assert ("grpc.primary_user_agent", "durabletask-tests") in opts diff --git a/tests/durabletask/test_client_async.py b/tests/durabletask/test_client_async.py index 9b6dfc3..43e8870 100644 --- a/tests/durabletask/test_client_async.py +++ b/tests/durabletask/test_client_async.py @@ -149,3 +149,23 @@ def test_async_client_construct_with_metadata(): interceptors = kwargs["interceptors"] assert isinstance(interceptors[0], DefaultClientInterceptorImpl) assert interceptors[0]._metadata == METADATA + + +def test_aio_channel_passes_base_options_and_max_lengths(): + base_options = [ + ("grpc.max_send_message_length", 4321), + ("grpc.max_receive_message_length", 8765), + ("grpc.primary_user_agent", "durabletask-aio-tests"), + ] + with patch("durabletask.aio.internal.shared.grpc_aio.insecure_channel") as mock_channel: + get_grpc_aio_channel(HOST_ADDRESS, False, options=base_options) + # Ensure called with options kwarg + assert mock_channel.call_count == 1 + args, kwargs = mock_channel.call_args + assert args[0] == HOST_ADDRESS + assert "options" in kwargs + opts = kwargs["options"] + # Check our base options made it through + assert ("grpc.max_send_message_length", 4321) in opts + assert ("grpc.max_receive_message_length", 8765) in opts + assert ("grpc.primary_user_agent", "durabletask-aio-tests") in opts diff --git a/tests/durabletask/test_grpc_aio_channel_options.py b/tests/durabletask/test_grpc_aio_channel_options.py deleted file mode 100644 index 2f64577..0000000 --- a/tests/durabletask/test_grpc_aio_channel_options.py +++ /dev/null @@ -1,94 +0,0 @@ -import json -from unittest.mock import patch - -import pytest - -from durabletask.aio.internal.shared import get_grpc_aio_channel - -HOST_ADDRESS = "localhost:50051" - - -def _find_option(options, key): - for k, v in options: - if k == key: - return v - raise AssertionError(f"Option with key {key} not found in options: {options}") - - -def test_aio_channel_passes_base_options_and_max_lengths(): - base_options = [ - ("grpc.max_send_message_length", 4321), - ("grpc.max_receive_message_length", 8765), - ("grpc.primary_user_agent", "durabletask-aio-tests"), - ] - with patch("durabletask.aio.internal.shared.grpc_aio.insecure_channel") as mock_channel: - get_grpc_aio_channel(HOST_ADDRESS, False, options=base_options) - # Ensure called with options kwarg - assert mock_channel.call_count == 1 - args, kwargs = mock_channel.call_args - assert args[0] == HOST_ADDRESS - assert "options" in kwargs - opts = kwargs["options"] - # Check our base options made it through - assert ("grpc.max_send_message_length", 4321) in opts - assert ("grpc.max_receive_message_length", 8765) in opts - assert ("grpc.primary_user_agent", "durabletask-aio-tests") in opts - - -def test_aio_channel_merges_env_keepalive_and_retry(monkeypatch: pytest.MonkeyPatch): - # retry grpc option - # service_config ref => https://github.com/grpc/grpc-proto/blob/master/grpc/service_config/service_config.proto#L44 - max_attempts = 4 - initial_backoff_ms = 250 - max_backoff_ms = 2000 - backoff_multiplier = 1.5 - codes = ["RESOURCE_EXHAUSTED"] - service_config = { - "methodConfig": [ - { - "name": [{"service": ""}], # match all services/methods - "retryPolicy": { - "maxAttempts": max_attempts, - "initialBackoff": f"{initial_backoff_ms / 1000.0}s", - "maxBackoff": f"{max_backoff_ms / 1000.0}s", - "backoffMultiplier": backoff_multiplier, - "retryableStatusCodes": codes, - }, - } - ] - } - - base_options = [("grpc.service_config", json.dumps(service_config))] - - with patch("durabletask.aio.internal.shared.grpc_aio.insecure_channel") as mock_channel: - get_grpc_aio_channel(HOST_ADDRESS, False, options=base_options) - - args, kwargs = mock_channel.call_args - assert args[0] == HOST_ADDRESS - assert "options" in kwargs - opts = kwargs["options"] - - # Retry service config present and parses correctly - svc_cfg_str = _find_option(opts, "grpc.service_config") - svc_cfg = json.loads(svc_cfg_str) - assert "methodConfig" in svc_cfg and isinstance(svc_cfg["methodConfig"], list) - retry_policy = svc_cfg["methodConfig"][0]["retryPolicy"] - assert retry_policy["maxAttempts"] == 4 - assert retry_policy["initialBackoff"] == f"{250 / 1000.0}s" - assert retry_policy["maxBackoff"] == f"{2000 / 1000.0}s" - assert retry_policy["backoffMultiplier"] == 1.5 - # Codes are upper-cased list - assert "RESOURCE_EXHAUSTED" in retry_policy["retryableStatusCodes"] - - -def test_aio_secure_channel_receives_options_when_secure_true(): - base_options = [("grpc.max_receive_message_length", 999999)] - with ( - patch("durabletask.aio.internal.shared.grpc_aio.secure_channel") as mock_channel, - patch("grpc.ssl_channel_credentials") as mock_credentials, - ): - get_grpc_aio_channel(HOST_ADDRESS, True, options=base_options) - args, kwargs = mock_channel.call_args - assert args[0] == HOST_ADDRESS - assert args[1] == mock_credentials.return_value - assert ("grpc.max_receive_message_length", 999999) in kwargs.get("options", []) diff --git a/tests/durabletask/test_grpc_channel_options.py b/tests/durabletask/test_grpc_channel_options.py deleted file mode 100644 index 841d75b..0000000 --- a/tests/durabletask/test_grpc_channel_options.py +++ /dev/null @@ -1,81 +0,0 @@ -import json -from unittest.mock import patch - -import pytest - -from durabletask.internal.shared import get_grpc_channel - -HOST_ADDRESS = "localhost:50051" - - -def _find_option(options, key): - for k, v in options: - if k == key: - return v - raise AssertionError(f"Option with key {key} not found in options: {options}") - - -def test_sync_channel_passes_base_options_and_max_lengths(): - base_options = [ - ("grpc.max_send_message_length", 1234), - ("grpc.max_receive_message_length", 5678), - ("grpc.primary_user_agent", "durabletask-tests"), - ] - with patch("grpc.insecure_channel") as mock_channel: - get_grpc_channel(HOST_ADDRESS, False, options=base_options) - # Ensure called with options kwarg - assert mock_channel.call_count == 1 - args, kwargs = mock_channel.call_args - assert args[0] == HOST_ADDRESS - assert "options" in kwargs - opts = kwargs["options"] - # Check our base options made it through - assert ("grpc.max_send_message_length", 1234) in opts - assert ("grpc.max_receive_message_length", 5678) in opts - assert ("grpc.primary_user_agent", "durabletask-tests") in opts - - -def test_sync_channel_merges_env_keepalive_and_retry(monkeypatch: pytest.MonkeyPatch): - # retry grpc option - # service_config ref => https://github.com/grpc/grpc-proto/blob/master/grpc/service_config/service_config.proto#L44 - max_attempts = 4 - initial_backoff_ms = 250 - max_backoff_ms = 2000 - backoff_multiplier = 1.5 - codes = ["ABORTED"] - service_config = { - "methodConfig": [ - { - "name": [{"service": ""}], # match all services/methods - "retryPolicy": { - "maxAttempts": max_attempts, - "initialBackoff": f"{initial_backoff_ms / 1000.0}s", - "maxBackoff": f"{max_backoff_ms / 1000.0}s", - "backoffMultiplier": backoff_multiplier, - "retryableStatusCodes": codes, - }, - } - ] - } - - base_options = [("grpc.service_config", json.dumps(service_config))] - - with patch("grpc.insecure_channel") as mock_channel: - get_grpc_channel(HOST_ADDRESS, False, options=base_options) - - args, kwargs = mock_channel.call_args - assert args[0] == HOST_ADDRESS - assert "options" in kwargs - opts = kwargs["options"] - - # Retry service config present and parses correctly - svc_cfg_str = _find_option(opts, "grpc.service_config") - svc_cfg = json.loads(svc_cfg_str) - assert "methodConfig" in svc_cfg and isinstance(svc_cfg["methodConfig"], list) - retry_policy = svc_cfg["methodConfig"][0]["retryPolicy"] - assert retry_policy["maxAttempts"] == 4 - assert retry_policy["initialBackoff"] == f"{250 / 1000.0}s" - assert retry_policy["maxBackoff"] == f"{2000 / 1000.0}s" - assert retry_policy["backoffMultiplier"] == 1.5 - # Codes are upper-cased list - assert "ABORTED" in retry_policy["retryableStatusCodes"] From 9bf49a9201ee1f875362b74cfe79054d17f05b55 Mon Sep 17 00:00:00 2001 From: Filinto Duran <1373693+filintod@users.noreply.github.com> Date: Tue, 4 Nov 2025 11:16:31 -0600 Subject: [PATCH 57/81] remove py39 Signed-off-by: Filinto Duran <1373693+filintod@users.noreply.github.com> --- .github/workflows/pr-validation.yml | 2 +- pyproject.toml | 4 ++-- tox.ini | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/pr-validation.yml b/.github/workflows/pr-validation.yml index e412ef9..7d1aff1 100644 --- a/.github/workflows/pr-validation.yml +++ b/.github/workflows/pr-validation.yml @@ -17,7 +17,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] + python-version: ["3.10", "3.11", "3.12", "3.13"] steps: - uses: actions/checkout@v4 diff --git a/pyproject.toml b/pyproject.toml index 575bc4a..6626bc2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -21,7 +21,7 @@ classifiers = [ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", ] -requires-python = ">=3.9" +requires-python = ">=3.10" license = {file = "LICENSE"} readme = "README.md" dependencies = [ @@ -62,7 +62,7 @@ dev = [ ] [tool.ruff] -target-version = "py310" # TODO: update to py310 when we drop support for py39 +target-version = "py310" line-length = 100 extend-exclude = [".github", "durabletask/internal/orchestrator_service_*.*"] diff --git a/tox.ini b/tox.ini index e035797..9b21313 100644 --- a/tox.ini +++ b/tox.ini @@ -2,7 +2,7 @@ skipsdist = True minversion = 3.10.0 envlist = - py{39,310,311,312,313,314} + py{310,311,312,313,314} ruff, mypy, # TODO: switch runner to uv (tox-uv plugin) @@ -10,7 +10,7 @@ runner = virtualenv [testenv] # you can run tox with the e2e pytest marker using tox factors: -# tox -e py39,py310,py311,py312,py313,py314 -- e2e +# tox -e py310,py311,py312,py313,py314 -- e2e # or single one with: # tox -e py310-e2e # to use custom grpc endpoint and not capture print statements (-s arg in pytest): From 301a13748d9719e9e7ef7bc09b0a883c7935bd6c Mon Sep 17 00:00:00 2001 From: Yevgen Polyak Date: Wed, 5 Nov 2025 10:43:06 +1300 Subject: [PATCH 58/81] Make when_all and when_any tasks composable #29 Signed-off-by: evhen14 --- durabletask/task.py | 4 ++++ tests/durabletask/test_task.py | 43 ++++++++++++++++++++++++++++++++++ 2 files changed, 47 insertions(+) diff --git a/durabletask/task.py b/durabletask/task.py index 2650bfd..66abc28 100644 --- a/durabletask/task.py +++ b/durabletask/task.py @@ -322,6 +322,8 @@ def on_child_completed(self, task: Task[T]): # The order of the result MUST match the order of the tasks provided to the constructor. self._result = [task.get_result() for task in self._tasks] self._is_complete = True + if self._parent is not None: + self._parent.on_child_completed(self) def get_completed_tasks(self) -> int: return self._completed_tasks @@ -423,6 +425,8 @@ def on_child_completed(self, task: Task): if not self.is_complete: self._is_complete = True self._result = task + if self._parent is not None: + self._parent.on_child_completed(self) def when_all(tasks: list[Task[T]]) -> WhenAllTask[T]: diff --git a/tests/durabletask/test_task.py b/tests/durabletask/test_task.py index 81cc8a2..9b1797f 100644 --- a/tests/durabletask/test_task.py +++ b/tests/durabletask/test_task.py @@ -46,6 +46,49 @@ def test_when_all_happy_path_returns_ordered_results_and_completes_last(): assert all_task.get_result() == ["one", "two", "three"] +def test_when_all_is_composable_with_when_any(): + c1 = task.CompletableTask() + c2 = task.CompletableTask() + + any_task = task.when_any([c1, c2]) + all_task = task.when_all([any_task]) + + assert not any_task.is_complete + assert not all_task.is_complete + + c2.complete("two") + + assert any_task.is_complete + assert all_task.is_complete + + assert all_task.is_complete + + assert all_task.get_result() == [c2] + + +def test_when_any_is_composable_with_when_all(): + c1 = task.CompletableTask() + c2 = task.CompletableTask() + c3 = task.CompletableTask() + + all_task1 = task.when_all([c1, c2]) + all_task2 = task.when_all([c3]) + any_task = task.when_any([all_task1, all_task2]) + + assert not any_task.is_complete + assert not all_task1.is_complete + assert not all_task2.is_complete + + c1.complete("one") + c2.complete("two") + + assert any_task.is_complete + assert all_task1.is_complete + assert not all_task2.is_complete + + assert any_task.get_result() == all_task1 + + def test_when_any_happy_path_returns_winner_task_and_completes_on_first(): a = task.CompletableTask() b = task.CompletableTask() From ab96d7c8cd7ee520c15a410ec7f343b2f5b9bbbd Mon Sep 17 00:00:00 2001 From: Filinto Duran <1373693+filintod@users.noreply.github.com> Date: Wed, 5 Nov 2025 09:25:23 -0600 Subject: [PATCH 59/81] add py314 to CI Signed-off-by: Filinto Duran <1373693+filintod@users.noreply.github.com> --- .github/workflows/pr-validation.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/pr-validation.yml b/.github/workflows/pr-validation.yml index 7d1aff1..3ed790b 100644 --- a/.github/workflows/pr-validation.yml +++ b/.github/workflows/pr-validation.yml @@ -17,7 +17,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.10", "3.11", "3.12", "3.13"] + python-version: ["3.10", "3.11", "3.12", "3.13", "3.14"] steps: - uses: actions/checkout@v4 @@ -43,7 +43,8 @@ jobs: # Install and run the durabletask-go sidecar for running e2e tests - name: Pytest e2e tests run: | - go install github.com/dapr/durabletask-go@main + # TODO: use dapr run instead of durabletask-go as it provides a more reliable sidecar behaviorfor e2e tests + go install github.com/dapr/durabletask-go@main durabletask-go --port 4001 & tox -e py${{ matrix.python-version }}-e2e publish: From 1a5793eaaa4b416057550992d05fa05b2638a8cb Mon Sep 17 00:00:00 2001 From: Yevgen Polyak Date: Thu, 6 Nov 2025 10:13:43 +1300 Subject: [PATCH 60/81] Update tests/durabletask/test_task.py Co-authored-by: Albert Callarisa Signed-off-by: Yevgen Polyak --- tests/durabletask/test_task.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/tests/durabletask/test_task.py b/tests/durabletask/test_task.py index 9b1797f..45f90e3 100644 --- a/tests/durabletask/test_task.py +++ b/tests/durabletask/test_task.py @@ -60,9 +60,6 @@ def test_when_all_is_composable_with_when_any(): assert any_task.is_complete assert all_task.is_complete - - assert all_task.is_complete - assert all_task.get_result() == [c2] From fadedba4bb6e42db6310d0ba1341142ba428dd46 Mon Sep 17 00:00:00 2001 From: Yevgen Polyak Date: Thu, 6 Nov 2025 10:13:50 +1300 Subject: [PATCH 61/81] Update tests/durabletask/test_task.py Co-authored-by: Albert Callarisa Signed-off-by: Yevgen Polyak --- tests/durabletask/test_task.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/durabletask/test_task.py b/tests/durabletask/test_task.py index 45f90e3..f7edd0f 100644 --- a/tests/durabletask/test_task.py +++ b/tests/durabletask/test_task.py @@ -77,6 +77,11 @@ def test_when_any_is_composable_with_when_all(): assert not all_task2.is_complete c1.complete("one") + + assert not any_task.is_complete + assert not all_task1.is_complete + assert not all_task2.is_complete + c2.complete("two") assert any_task.is_complete From bd3fdb2ef60ea6a768657fadfd048ecc5335a8db Mon Sep 17 00:00:00 2001 From: Albert Callarisa Date: Thu, 6 Nov 2025 07:28:50 +0100 Subject: [PATCH 62/81] Fix linter Signed-off-by: Albert Callarisa --- tests/durabletask/test_task.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/durabletask/test_task.py b/tests/durabletask/test_task.py index f7edd0f..d8ec88e 100644 --- a/tests/durabletask/test_task.py +++ b/tests/durabletask/test_task.py @@ -77,11 +77,11 @@ def test_when_any_is_composable_with_when_all(): assert not all_task2.is_complete c1.complete("one") - + assert not any_task.is_complete assert not all_task1.is_complete assert not all_task2.is_complete - + c2.complete("two") assert any_task.is_complete From c5135061bdb2bbb924012ab10354dff17164c595 Mon Sep 17 00:00:00 2001 From: Filinto Duran <1373693+filintod@users.noreply.github.com> Date: Tue, 11 Nov 2025 22:47:39 -0600 Subject: [PATCH 63/81] add non-retryable, fix retry bug, and add shutdown helpers to reduce noisy logs Signed-off-by: Filinto Duran <1373693+filintod@users.noreply.github.com> --- README.md | 91 +++++- durabletask/client.py | 19 ++ durabletask/task.py | 50 +++- durabletask/worker.py | 103 ++++++- requirements.txt | 2 +- tests/durabletask/test_client.py | 16 +- tests/durabletask/test_orchestration_e2e.py | 262 ++++++++++------- .../test_orchestration_e2e_async.py | 38 +-- .../test_orchestration_executor.py | 265 +++++++++++++++++- tox.ini | 6 +- 10 files changed, 714 insertions(+), 138 deletions(-) diff --git a/README.md b/README.md index f6a0284..40a4e6e 100644 --- a/README.md +++ b/README.md @@ -126,10 +126,97 @@ Orchestrations can be continued as new using the `continue_as_new` API. This API Orchestrations can be suspended using the `suspend_orchestration` client API and will remain suspended until resumed using the `resume_orchestration` client API. A suspended orchestration will stop processing new events, but will continue to buffer any that happen to arrive until resumed, ensuring that no data is lost. An orchestration can also be terminated using the `terminate_orchestration` client API. Terminated orchestrations will stop processing new events and will discard any buffered events. -### Retry policies (TODO) +### Retry policies Orchestrations can specify retry policies for activities and sub-orchestrations. These policies control how many times and how frequently an activity or sub-orchestration will be retried in the event of a transient error. +#### Creating a retry policy + +```python +from datetime import timedelta +from durabletask import task + +retry_policy = task.RetryPolicy( + first_retry_interval=timedelta(seconds=1), # Initial delay before first retry + max_number_of_attempts=5, # Maximum total attempts (includes first attempt) + backoff_coefficient=2.0, # Exponential backoff multiplier (must be >= 1) + max_retry_interval=timedelta(seconds=30), # Cap on retry delay + retry_timeout=timedelta(minutes=5), # Total time limit for all retries (optional) +) +``` + +**Notes:** +- `max_number_of_attempts` **includes the initial attempt**. For example, `max_number_of_attempts=5` means 1 initial attempt + up to 4 retries. +- `retry_timeout` is optional. If omitted or set to `None`, retries continue until `max_number_of_attempts` is reached. +- `backoff_coefficient` controls exponential backoff: delay = `first_retry_interval * (backoff_coefficient ^ retry_number)`, capped by `max_retry_interval`. +- `non_retryable_error_types` (optional) can specify additional exception types to treat as non-retryable (e.g., `[ValueError, TypeError]`). `NonRetryableError` is always non-retryable regardless of this setting. + +#### Using retry policies + +Apply retry policies to activities or sub-orchestrations: + +```python +def my_orchestrator(ctx: task.OrchestrationContext, input): + # Retry an activity + result = yield ctx.call_activity(my_activity, input=data, retry_policy=retry_policy) + + # Retry a sub-orchestration + result = yield ctx.call_sub_orchestrator(child_orchestrator, input=data, retry_policy=retry_policy) +``` + +#### Non-retryable errors + +For errors that should not be retried (e.g., validation failures, permanent errors), raise a `NonRetryableError`: + +```python +from durabletask.task import NonRetryableError + +def my_activity(ctx: task.ActivityContext, input): + if input is None: + # This error will bypass retry logic and fail immediately + raise NonRetryableError("Input cannot be None") + + # Transient errors (network, timeouts, etc.) will be retried + return call_external_service(input) +``` + +Even with a retry policy configured, `NonRetryableError` will fail immediately without retrying. + +#### Error type matching behavior + +**Important:** Error type matching uses **exact class name comparison**, not `isinstance()` checks. This is because exception objects are serialized to gRPC protobuf messages, where only the class name (as a string) survives serialization. + +**Key implications:** + +- **Not inheritance-aware**: If you specify `ValueError` in `non_retryable_error_types`, it will only match exceptions with the exact class name `"ValueError"`. A custom subclass like `CustomValueError(ValueError)` will NOT match. +- **Workaround**: List all exception types explicitly, including subclasses you want to handle. +- **Built-in exception**: `NonRetryableError` is always treated as non-retryable, matched by the name `"NonRetryableError"`. + +**Example:** + +```python +from datetime import timedelta +from durabletask import task + +# Custom exception hierarchy +class ValidationError(ValueError): + pass + +# This policy ONLY matches exact "ValueError" by name +retry_policy = task.RetryPolicy( + first_retry_interval=timedelta(seconds=1), + max_number_of_attempts=3, + non_retryable_error_types=[ValueError] # Won't match ValidationError subclass! +) + +# To handle both, list them explicitly: +retry_policy = task.RetryPolicy( + first_retry_interval=timedelta(seconds=1), + max_number_of_attempts=3, + non_retryable_error_types=[ValueError, ValidationError] # Both converted to name strings +) +``` + ## Getting Started ### Prerequisites @@ -194,7 +281,7 @@ Certain aspects like multi-app activities require the full dapr runtime to be ru ```shell dapr init || true -dapr run --app-id test-app --dapr-grpc-port 4001 --components-path ./examples/components/ +dapr run --app-id test-app --dapr-grpc-port 4001 --resources-path ./examples/components/ ``` To run the E2E tests on a specific python version (eg: 3.11), run the following command from the project root: diff --git a/durabletask/client.py b/durabletask/client.py index 1e28f30..ff4b326 100644 --- a/durabletask/client.py +++ b/durabletask/client.py @@ -127,9 +127,28 @@ def __init__( interceptors=interceptors, options=channel_options, ) + self._channel = channel self._stub = stubs.TaskHubSidecarServiceStub(channel) self._logger = shared.get_logger("client", log_handler, log_formatter) + def __enter__(self): + return self + + def __exit__(self, exc_type, exc, tb): + try: + self.close() + finally: + return False + + def close(self) -> None: + """Close the underlying gRPC channel.""" + try: + # grpc.Channel.close() is idempotent + self._channel.close() + except Exception: + # Best-effort cleanup + pass + def schedule_new_orchestration( self, orchestrator: Union[task.Orchestrator[TInput, TOutput], str], diff --git a/durabletask/task.py b/durabletask/task.py index 66abc28..c24decf 100644 --- a/durabletask/task.py +++ b/durabletask/task.py @@ -233,6 +233,29 @@ class OrchestrationStateError(Exception): pass +class NonRetryableError(Exception): + """Exception indicating the operation should not be retried. + + If an activity or sub-orchestration raises this exception, retry logic will be + bypassed and the failure will be returned immediately to the orchestrator. + """ + + pass + + +def is_error_non_retryable(error_type: str, policy: RetryPolicy) -> bool: + """ Checks whether an error type is non-retryable.""" + is_non_retryable = False + if error_type == "NonRetryableError": + is_non_retryable = True + elif ( + policy.non_retryable_error_types is not None + and error_type in policy.non_retryable_error_types + ): + is_non_retryable = True + return is_non_retryable + + class Task(ABC, Generic[T]): """Abstract base class for asynchronous tasks in a durable orchestration.""" @@ -397,7 +420,7 @@ def compute_next_delay(self) -> Optional[timedelta]: next_delay_f = min( next_delay_f, self._retry_policy.max_retry_interval.total_seconds() ) - return timedelta(seconds=next_delay_f) + return timedelta(seconds=next_delay_f) return None @@ -490,6 +513,7 @@ def __init__( backoff_coefficient: Optional[float] = 1.0, max_retry_interval: Optional[timedelta] = None, retry_timeout: Optional[timedelta] = None, + non_retryable_error_types: Optional[list[Union[str, type]]] = None, ): """Creates a new RetryPolicy instance. @@ -505,6 +529,11 @@ def __init__( The maximum retry interval to use for any retry attempt. retry_timeout : Optional[timedelta] The maximum amount of time to spend retrying the operation. + non_retryable_error_types : Optional[list[Union[str, type]]] + A list of exception type names or classes that should not be retried. + If a failure's error type matches any of these, the task fails immediately. + The built-in NonRetryableError is always treated as non-retryable regardless + of this setting. """ # validate inputs if first_retry_interval < timedelta(seconds=0): @@ -523,6 +552,16 @@ def __init__( self._backoff_coefficient = backoff_coefficient self._max_retry_interval = max_retry_interval self._retry_timeout = retry_timeout + # Normalize non-retryable error type names to a set of strings + names: Optional[set[str]] = None + if non_retryable_error_types: + names = set[str]() + for t in non_retryable_error_types: + if isinstance(t, str) and t: + names.add(t) + elif isinstance(t, type): + names.add(t.__name__) + self._non_retryable_error_types = names @property def first_retry_interval(self) -> timedelta: @@ -549,6 +588,15 @@ def retry_timeout(self) -> Optional[timedelta]: """The maximum amount of time to spend retrying the operation.""" return self._retry_timeout + @property + def non_retryable_error_types(self) -> Optional[set[str]]: + """Set of error type names that should not be retried. + + Comparison is performed against the errorType string provided by the + backend (typically the exception class name). + """ + return self._non_retryable_error_types + def get_name(fn: Callable) -> str: """Returns the name of the provided function""" diff --git a/durabletask/worker.py b/durabletask/worker.py index daa661b..d65e669 100644 --- a/durabletask/worker.py +++ b/durabletask/worker.py @@ -21,6 +21,7 @@ import durabletask.internal.shared as shared from durabletask import task from durabletask.internal.grpc_interceptor import DefaultClientInterceptorImpl +from durabletask.task import RetryPolicy TInput = TypeVar("TInput") TOutput = TypeVar("TOutput") @@ -159,6 +160,8 @@ class TaskHubGrpcWorker: interceptors to apply to the channel. Defaults to None. concurrency_options (Optional[ConcurrencyOptions], optional): Configuration for controlling worker concurrency limits. If None, default settings are used. + stop_timeout (float, optional): Maximum time in seconds to wait for the worker thread + to stop when calling stop(). Defaults to 30.0. Useful to set lower values in tests. Attributes: concurrency_options (ConcurrencyOptions): The current concurrency configuration. @@ -224,6 +227,7 @@ def __init__( interceptors: Optional[Sequence[shared.ClientInterceptor]] = None, concurrency_options: Optional[ConcurrencyOptions] = None, channel_options: Optional[Sequence[tuple[str, Any]]] = None, + stop_timeout: float = 30.0, ): self._registry = _Registry() self._host_address = host_address if host_address else shared.get_default_host_address() @@ -232,6 +236,7 @@ def __init__( self._is_running = False self._secure_channel = secure_channel self._channel_options = channel_options + self._stop_timeout = stop_timeout # Use provided concurrency options or create default ones self._concurrency_options = ( @@ -249,6 +254,8 @@ def __init__( self._interceptors = None self._async_worker_manager = _AsyncWorkerManager(self._concurrency_options) + # Readiness flag set once the worker has an active stream to the sidecar + self._ready = Event() @property def concurrency_options(self) -> ConcurrencyOptions: @@ -351,6 +358,8 @@ def invalidate_connection(): pass current_channel = None current_stub = None + # No longer ready if connection is gone + self._ready.clear() def should_invalidate_connection(rpc_error): error_code = rpc_error.code() # type: ignore @@ -390,6 +399,8 @@ def should_invalidate_connection(rpc_error): self._logger.info( f"Successfully connected to {self._host_address}. Waiting for work items..." ) + # Signal readiness once stream is established + self._ready.set() # Use a thread to read from the blocking gRPC stream and forward to asyncio import queue @@ -398,7 +409,10 @@ def should_invalidate_connection(rpc_error): def stream_reader(): try: - for work_item in self._response_stream: + stream = self._response_stream + if stream is None: + return + for work_item in stream: # type: ignore work_item_queue.put(work_item) except Exception as e: work_item_queue.put(e) @@ -433,6 +447,8 @@ def stream_reader(): pass else: self._logger.warning(f"Unexpected work item type: {request_type}") + except grpc.RpcError: + raise # let it be captured/parsed by outer except and avoid noisy log except Exception as e: self._logger.warning(f"Error in work item stream: {e}") raise e @@ -489,10 +505,18 @@ def stop(self): if self._response_stream is not None: self._response_stream.cancel() if self._runLoop is not None: - self._runLoop.join(timeout=30) + self._runLoop.join(timeout=self._stop_timeout) self._async_worker_manager.shutdown() self._logger.info("Worker shutdown completed") self._is_running = False + self._ready.clear() + + def wait_for_ready(self, timeout: Optional[float] = None) -> bool: + """Block until the worker has an active connection to the sidecar. + + Returns True if the worker became ready within the timeout; otherwise False. + """ + return self._ready.wait(timeout) def _execute_orchestrator( self, @@ -527,6 +551,25 @@ def _execute_orchestrator( try: stub.CompleteOrchestratorTask(res) + except grpc.RpcError as rpc_error: # type: ignore + # During shutdown or if the instance was terminated, the channel may be closed + # or the instance may no longer be recognized by the sidecar. Treat these as benign + # to reduce noisy logging when shutting down. + code = rpc_error.code() # type: ignore + details = str(rpc_error) + benign = ( + code in {grpc.StatusCode.CANCELLED, grpc.StatusCode.UNAVAILABLE} + or "unknown instance ID/task ID combo" in details + or "Channel closed" in details + ) + if self._shutdown.is_set() or benign: + self._logger.debug( + f"Ignoring orchestrator completion delivery error during shutdown/benign condition: {rpc_error}" + ) + else: + self._logger.exception( + f"Failed to deliver orchestrator response for '{req.instanceId}' to sidecar: {rpc_error}" + ) except Exception as ex: self._logger.exception( f"Failed to deliver orchestrator response for '{req.instanceId}' to sidecar: {ex}" @@ -558,6 +601,27 @@ def _execute_activity( try: stub.CompleteActivityTask(res) + except grpc.RpcError as rpc_error: # type: ignore + # Treat common shutdown/termination races as benign to avoid noisy logs + code = rpc_error.code() # type: ignore + details = str(rpc_error) + benign = code in { + grpc.StatusCode.CANCELLED, + grpc.StatusCode.UNAVAILABLE, + grpc.StatusCode.UNKNOWN, + } and ( + "unknown instance ID/task ID combo" in details + or "Channel closed" in details + or "Locally cancelled by application" in details + ) + if self._shutdown.is_set() or benign: + self._logger.debug( + f"Ignoring activity completion delivery error during shutdown/benign condition: {rpc_error}" + ) + else: + self._logger.exception( + f"Failed to deliver activity response for '{req.name}#{req.taskId}' of orchestration ID '{instance_id}' to sidecar: {rpc_error}" + ) except Exception as ex: self._logger.exception( f"Failed to deliver activity response for '{req.name}#{req.taskId}' of orchestration ID '{instance_id}' to sidecar: {ex}" @@ -802,7 +866,8 @@ def call_activity_function_helper( id = self.next_sequence_number() router = pb.TaskRouter() - router.sourceAppID = self._app_id + if self._app_id is not None: + router.sourceAppID = self._app_id if app_id is not None: router.targetAppID = app_id @@ -1078,16 +1143,24 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven if isinstance(activity_task, task.RetryableTask): if activity_task._retry_policy is not None: - next_delay = activity_task.compute_next_delay() - if next_delay is None: + # Check for non-retryable errors by type name + if task.is_error_non_retryable(event.taskFailed.failureDetails.errorType, activity_task._retry_policy): activity_task.fail( f"{ctx.instance_id}: Activity task #{task_id} failed: {event.taskFailed.failureDetails.errorMessage}", event.taskFailed.failureDetails, ) ctx.resume() else: - activity_task.increment_attempt_count() - ctx.create_timer_internal(next_delay, activity_task) + next_delay = activity_task.compute_next_delay() + if next_delay is None: + activity_task.fail( + f"{ctx.instance_id}: Activity task #{task_id} failed: {event.taskFailed.failureDetails.errorMessage}", + event.taskFailed.failureDetails, + ) + ctx.resume() + else: + activity_task.increment_attempt_count() + ctx.create_timer_internal(next_delay, activity_task) elif isinstance(activity_task, task.CompletableTask): activity_task.fail( f"{ctx.instance_id}: Activity task #{task_id} failed: {event.taskFailed.failureDetails.errorMessage}", @@ -1145,16 +1218,24 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven return if isinstance(sub_orch_task, task.RetryableTask): if sub_orch_task._retry_policy is not None: - next_delay = sub_orch_task.compute_next_delay() - if next_delay is None: + # Check for non-retryable errors by type name + if task.is_error_non_retryable(failedEvent.failureDetails.errorType, sub_orch_task._retry_policy): sub_orch_task.fail( f"Sub-orchestration task #{task_id} failed: {failedEvent.failureDetails.errorMessage}", failedEvent.failureDetails, ) ctx.resume() else: - sub_orch_task.increment_attempt_count() - ctx.create_timer_internal(next_delay, sub_orch_task) + next_delay = sub_orch_task.compute_next_delay() + if next_delay is None: + sub_orch_task.fail( + f"Sub-orchestration task #{task_id} failed: {failedEvent.failureDetails.errorMessage}", + failedEvent.failureDetails, + ) + ctx.resume() + else: + sub_orch_task.increment_attempt_count() + ctx.create_timer_internal(next_delay, sub_orch_task) elif isinstance(sub_orch_task, task.CompletableTask): sub_orch_task.fail( f"Sub-orchestration task #{task_id} failed: {failedEvent.failureDetails.errorMessage}", diff --git a/requirements.txt b/requirements.txt index 7b288f0..b6902e9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1 +1 @@ -# requirements in pyproject.toml +# pyproject.toml has the dependencies for this project \ No newline at end of file diff --git a/tests/durabletask/test_client.py b/tests/durabletask/test_client.py index b671cf8..da72cb0 100644 --- a/tests/durabletask/test_client.py +++ b/tests/durabletask/test_client.py @@ -1,4 +1,4 @@ -from unittest.mock import patch +from unittest.mock import patch, MagicMock from durabletask.internal.grpc_interceptor import DefaultClientInterceptorImpl from durabletask.internal.shared import get_default_host_address, get_grpc_channel @@ -140,3 +140,17 @@ def test_sync_channel_passes_base_options_and_max_lengths(): assert ("grpc.max_send_message_length", 1234) in opts assert ("grpc.max_receive_message_length", 5678) in opts assert ("grpc.primary_user_agent", "durabletask-tests") in opts + + +def test_taskhub_client_close_handles_exceptions(): + """Test that close() handles exceptions gracefully (edge case not easily testable in E2E).""" + with patch("durabletask.internal.shared.get_grpc_channel") as mock_get_channel: + mock_channel = MagicMock() + mock_channel.close.side_effect = Exception("close failed") + mock_get_channel.return_value = mock_channel + + from durabletask import client + + task_hub_client = client.TaskHubGrpcClient() + # Should not raise exception + task_hub_client.close() diff --git a/tests/durabletask/test_orchestration_e2e.py b/tests/durabletask/test_orchestration_e2e.py index 225456d..b64e446 100644 --- a/tests/durabletask/test_orchestration_e2e.py +++ b/tests/durabletask/test_orchestration_e2e.py @@ -5,6 +5,7 @@ import threading import time from datetime import timedelta +from typing import Optional import pytest @@ -16,6 +17,32 @@ pytestmark = pytest.mark.e2e +def _wait_until_terminal( + hub_client: client.TaskHubGrpcClient, + instance_id: str, + *, + timeout_s: int = 30, + fetch_payloads: bool = True, +) -> Optional[client.OrchestrationState]: + """Polling-based completion wait that does not rely on the completion stream. + + Returns the terminal state or None if timeout. + """ + deadline = time.time() + timeout_s + delay = 0.1 + while time.time() < deadline: + st = hub_client.get_orchestration_state(instance_id, fetch_payloads=fetch_payloads) + if st and st.runtime_status in ( + client.OrchestrationStatus.COMPLETED, + client.OrchestrationStatus.FAILED, + client.OrchestrationStatus.TERMINATED, + ): + return st + time.sleep(delay) + delay = min(delay * 1.5, 1.0) + return None + + def test_empty_orchestration(): invoked = False @@ -31,12 +58,18 @@ def empty_orchestrator(ctx: task.OrchestrationContext, _): with worker.TaskHubGrpcWorker(channel_options=channel_options) as w: w.add_orchestrator(empty_orchestrator) w.start() + w.wait_for_ready(timeout=10) # set a custom max send length option c = client.TaskHubGrpcClient(channel_options=channel_options) id = c.schedule_new_orchestration(empty_orchestrator) state = c.wait_for_orchestration_completion(id, timeout=30) + # Test calling wait again on already-completed orchestration (should return immediately) + state2 = c.wait_for_orchestration_completion(id, timeout=30) + assert state2 is not None + assert state2.runtime_status == client.OrchestrationStatus.COMPLETED + assert invoked assert state is not None assert state.name == task.get_name(empty_orchestrator) @@ -61,14 +94,15 @@ def sequence(ctx: task.OrchestrationContext, start_val: int): return numbers # Start a worker, which will connect to the sidecar in a background thread - with worker.TaskHubGrpcWorker() as w: + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: w.add_orchestrator(sequence) w.add_activity(plus_one) w.start() + w.wait_for_ready(timeout=10) - task_hub_client = client.TaskHubGrpcClient() - id = task_hub_client.schedule_new_orchestration(sequence, input=1) - state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) + with client.TaskHubGrpcClient() as task_hub_client: + id = task_hub_client.schedule_new_orchestration(sequence, input=1) + state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) assert state is not None assert state.name == task.get_name(sequence) @@ -104,15 +138,16 @@ def orchestrator(ctx: task.OrchestrationContext, input: int): return error_msg # Start a worker, which will connect to the sidecar in a background thread - with worker.TaskHubGrpcWorker() as w: + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: w.add_orchestrator(orchestrator) w.add_activity(throw) w.add_activity(increment_counter) w.start() + w.wait_for_ready(timeout=10) - task_hub_client = client.TaskHubGrpcClient() - id = task_hub_client.schedule_new_orchestration(orchestrator, input=1) - state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) + with client.TaskHubGrpcClient() as task_hub_client: + id = task_hub_client.schedule_new_orchestration(orchestrator, input=1) + state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) assert state is not None assert state.name == task.get_name(orchestrator) @@ -146,15 +181,16 @@ def parent_orchestrator(ctx: task.OrchestrationContext, count: int): yield task.when_all(tasks) # Start a worker, which will connect to the sidecar in a background thread - with worker.TaskHubGrpcWorker() as w: + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: w.add_activity(increment) w.add_orchestrator(orchestrator_child) w.add_orchestrator(parent_orchestrator) w.start() + w.wait_for_ready(timeout=10) - task_hub_client = client.TaskHubGrpcClient() - id = task_hub_client.schedule_new_orchestration(parent_orchestrator, input=10) - state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) + with client.TaskHubGrpcClient() as task_hub_client: + id = task_hub_client.schedule_new_orchestration(parent_orchestrator, input=10) + state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) assert state is not None assert state.runtime_status == client.OrchestrationStatus.COMPLETED @@ -170,9 +206,10 @@ def orchestrator(ctx: task.OrchestrationContext, _): return [a, b, c] # Start a worker, which will connect to the sidecar in a background thread - with worker.TaskHubGrpcWorker() as w: + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: w.add_orchestrator(orchestrator) w.start() + w.wait_for_ready(timeout=10) # Start the orchestration and immediately raise events to it. task_hub_client = client.TaskHubGrpcClient() @@ -199,16 +236,17 @@ def orchestrator(ctx: task.OrchestrationContext, _): return "timed out" # Start a worker, which will connect to the sidecar in a background thread - with worker.TaskHubGrpcWorker() as w: + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: w.add_orchestrator(orchestrator) w.start() + w.wait_for_ready(timeout=10) # Start the orchestration and immediately raise events to it. - task_hub_client = client.TaskHubGrpcClient() - id = task_hub_client.schedule_new_orchestration(orchestrator) - if raise_event: - task_hub_client.raise_orchestration_event(id, "Approval") - state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) + with client.TaskHubGrpcClient() as task_hub_client: + id = task_hub_client.schedule_new_orchestration(orchestrator) + if raise_event: + task_hub_client.raise_orchestration_event(id, "Approval") + state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) assert state is not None assert state.runtime_status == client.OrchestrationStatus.COMPLETED @@ -224,37 +262,37 @@ def orchestrator(ctx: task.OrchestrationContext, _): return result # Start a worker, which will connect to the sidecar in a background thread - with worker.TaskHubGrpcWorker() as w: + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: w.add_orchestrator(orchestrator) w.start() - - task_hub_client = client.TaskHubGrpcClient() - id = task_hub_client.schedule_new_orchestration(orchestrator) - state = task_hub_client.wait_for_orchestration_start(id, timeout=30) - assert state is not None - - # Suspend the orchestration and wait for it to go into the SUSPENDED state - task_hub_client.suspend_orchestration(id) - while state.runtime_status == client.OrchestrationStatus.RUNNING: - time.sleep(0.1) - state = task_hub_client.get_orchestration_state(id) + w.wait_for_ready(timeout=10) + with client.TaskHubGrpcClient() as task_hub_client: + id = task_hub_client.schedule_new_orchestration(orchestrator) + state = task_hub_client.wait_for_orchestration_start(id, timeout=30) assert state is not None - assert state.runtime_status == client.OrchestrationStatus.SUSPENDED - # Raise an event to the orchestration and confirm that it does NOT complete - task_hub_client.raise_orchestration_event(id, "my_event", data=42) - try: - state = task_hub_client.wait_for_orchestration_completion(id, timeout=3) - assert False, "Orchestration should not have completed" - except TimeoutError: - pass - - # Resume the orchestration and wait for it to complete - task_hub_client.resume_orchestration(id) - state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) - assert state is not None - assert state.runtime_status == client.OrchestrationStatus.COMPLETED - assert state.serialized_output == json.dumps(42) + # Suspend the orchestration and wait for it to go into the SUSPENDED state + task_hub_client.suspend_orchestration(id) + while state.runtime_status == client.OrchestrationStatus.RUNNING: + time.sleep(0.1) + state = task_hub_client.get_orchestration_state(id) + assert state is not None + assert state.runtime_status == client.OrchestrationStatus.SUSPENDED + + # Raise an event to the orchestration and confirm that it does NOT complete + task_hub_client.raise_orchestration_event(id, "my_event", data=42) + try: + state = task_hub_client.wait_for_orchestration_completion(id, timeout=3) + assert False, "Orchestration should not have completed" + except TimeoutError: + pass + + # Resume the orchestration and wait for it to complete + task_hub_client.resume_orchestration(id) + state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) + assert state is not None + assert state.runtime_status == client.OrchestrationStatus.COMPLETED + assert state.serialized_output == json.dumps(42) def test_terminate(): @@ -263,27 +301,29 @@ def orchestrator(ctx: task.OrchestrationContext, _): return result # Start a worker, which will connect to the sidecar in a background thread - with worker.TaskHubGrpcWorker() as w: + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: w.add_orchestrator(orchestrator) w.start() + w.wait_for_ready(timeout=10) + with client.TaskHubGrpcClient() as task_hub_client: + id = task_hub_client.schedule_new_orchestration(orchestrator) + state = task_hub_client.wait_for_orchestration_start(id, timeout=30) + assert state is not None + assert state.runtime_status == client.OrchestrationStatus.RUNNING - task_hub_client = client.TaskHubGrpcClient() - id = task_hub_client.schedule_new_orchestration(orchestrator) - state = task_hub_client.wait_for_orchestration_start(id, timeout=30) - assert state is not None - assert state.runtime_status == client.OrchestrationStatus.RUNNING - - task_hub_client.terminate_orchestration(id, output="some reason for termination") - state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) - assert state is not None - assert state.runtime_status == client.OrchestrationStatus.TERMINATED - assert state.serialized_output == json.dumps("some reason for termination") + task_hub_client.terminate_orchestration(id, output="some reason for termination") + state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) + assert state is not None + assert state.runtime_status == client.OrchestrationStatus.TERMINATED + assert state.serialized_output == json.dumps("some reason for termination") def test_terminate_recursive(): thread_lock = threading.Lock() activity_counter = 0 - delay_time = 4 # seconds + delay_time = ( + 2 # seconds (already optimized from 4s - don't reduce further as it can leads to failure) + ) def increment(ctx, _): with thread_lock: @@ -303,36 +343,39 @@ def parent_orchestrator(ctx: task.OrchestrationContext, count: int): yield task.when_all(tasks) for recurse in [True, False]: - with worker.TaskHubGrpcWorker() as w: + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: w.add_activity(increment) w.add_orchestrator(orchestrator_child) w.add_orchestrator(parent_orchestrator) w.start() + w.wait_for_ready(timeout=10) + with client.TaskHubGrpcClient() as task_hub_client: + instance_id = task_hub_client.schedule_new_orchestration( + parent_orchestrator, input=5 + ) - task_hub_client = client.TaskHubGrpcClient() - instance_id = task_hub_client.schedule_new_orchestration(parent_orchestrator, input=5) - - time.sleep(2) - - output = "Recursive termination = {recurse}" - task_hub_client.terminate_orchestration(instance_id, output=output, recursive=recurse) - - metadata = task_hub_client.wait_for_orchestration_completion(instance_id, timeout=30) - - assert metadata is not None - assert metadata.runtime_status == client.OrchestrationStatus.TERMINATED - assert metadata.serialized_output == f'"{output}"' - - time.sleep(delay_time) + time.sleep(1) # Brief delay to let orchestrations start - if recurse: - assert activity_counter == 0, ( - "Activity should not have executed with recursive termination" + output = "Recursive termination = {recurse}" + task_hub_client.terminate_orchestration( + instance_id, output=output, recursive=recurse ) - else: - assert activity_counter == 5, ( - "Activity should have executed without recursive termination" + + metadata = task_hub_client.wait_for_orchestration_completion( + instance_id, timeout=30 ) + assert metadata is not None + assert metadata.runtime_status == client.OrchestrationStatus.TERMINATED + assert metadata.serialized_output == f'"{output}"' + time.sleep(delay_time) # Wait for timer to check activity execution + if recurse: + assert activity_counter == 0, ( + "Activity should not have executed with recursive termination" + ) + else: + assert activity_counter == 5, ( + "Activity should have executed without recursive termination" + ) def test_continue_as_new(): @@ -351,9 +394,10 @@ def orchestrator(ctx: task.OrchestrationContext, input: int): return all_results # Start a worker, which will connect to the sidecar in a background thread - with worker.TaskHubGrpcWorker() as w: + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: w.add_orchestrator(orchestrator) w.start() + w.wait_for_ready(timeout=10) task_hub_client = client.TaskHubGrpcClient() id = task_hub_client.schedule_new_orchestration(orchestrator, input=0) @@ -391,7 +435,7 @@ def orchestrator(ctx: task.OrchestrationContext, counter: int): else: return {"counter": counter, "processed": processed, "all_results": activity_results} - with worker.TaskHubGrpcWorker() as w: + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: w.add_activity(double_activity) w.add_orchestrator(orchestrator) w.start() @@ -424,13 +468,13 @@ def test_retry_policies(): child_orch_counter = 0 throw_activity_counter = 0 - # Second setup: With retry policies + # Second setup: With retry policies (minimal delays for faster tests) retry_policy = task.RetryPolicy( - first_retry_interval=timedelta(seconds=1), + first_retry_interval=timedelta(seconds=0.05), # 0.1 → 0.05 (50% faster) max_number_of_attempts=3, backoff_coefficient=1, - max_retry_interval=timedelta(seconds=10), - retry_timeout=timedelta(seconds=30), + max_retry_interval=timedelta(seconds=0.5), # 1 → 0.5 + retry_timeout=timedelta(seconds=2), # 3 → 2 ) def parent_orchestrator_with_retry(ctx: task.OrchestrationContext, _): @@ -449,11 +493,12 @@ def throw_activity_with_retry(ctx: task.ActivityContext, _): throw_activity_counter += 1 raise RuntimeError("Kah-BOOOOM!!!") - with worker.TaskHubGrpcWorker() as w: + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: w.add_orchestrator(parent_orchestrator_with_retry) w.add_orchestrator(child_orchestrator_with_retry) w.add_activity(throw_activity_with_retry) w.start() + w.wait_for_ready(timeout=10) task_hub_client = client.TaskHubGrpcClient() id = task_hub_client.schedule_new_orchestration(parent_orchestrator_with_retry) @@ -468,19 +513,47 @@ def throw_activity_with_retry(ctx: task.ActivityContext, _): assert throw_activity_counter == 9 assert child_orch_counter == 3 + # Test 2: Verify NonRetryableError prevents retries even with retry policy + non_retryable_counter = 0 + + def throw_non_retryable(ctx: task.ActivityContext, _): + nonlocal non_retryable_counter + non_retryable_counter += 1 + raise task.NonRetryableError("Cannot retry this!") + + def orchestrator_with_non_retryable(ctx: task.OrchestrationContext, _): + # Even with retry policy, NonRetryableError should fail immediately + yield ctx.call_activity(throw_non_retryable, retry_policy=retry_policy) + + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: + w.add_orchestrator(orchestrator_with_non_retryable) + w.add_activity(throw_non_retryable) + w.start() + w.wait_for_ready(timeout=10) + + task_hub_client = client.TaskHubGrpcClient() + id = task_hub_client.schedule_new_orchestration(orchestrator_with_non_retryable) + state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) + assert state is not None + assert state.runtime_status == client.OrchestrationStatus.FAILED + assert state.failure_details is not None + assert "Cannot retry this!" in state.failure_details.message + # Key assertion: activity was called exactly once (no retries) + assert non_retryable_counter == 1 + def test_retry_timeout(): # This test verifies that the retry timeout is working as expected. - # Max number of attempts is 5 and retry timeout is 14 seconds. - # Total seconds consumed till 4th attempt is 1 + 2 + 4 + 8 = 15 seconds. - # So, the 5th attempt should not be made and the orchestration should fail. + # Max number of attempts is 5 and retry timeout is 1.7 seconds. + # Delays: 0.25 + 0.5 + 1.0 = 1.75 seconds cumulative before 4th attempt. + # So, the 5th attempt (which would happen at 1.75s) should not be made. throw_activity_counter = 0 retry_policy = task.RetryPolicy( first_retry_interval=timedelta(seconds=1), max_number_of_attempts=5, backoff_coefficient=2, max_retry_interval=timedelta(seconds=10), - retry_timeout=timedelta(seconds=14), + retry_timeout=timedelta(seconds=13), # Set just before 4th attempt ) def mock_orchestrator(ctx: task.OrchestrationContext, _): @@ -491,10 +564,11 @@ def throw_activity(ctx: task.ActivityContext, _): throw_activity_counter += 1 raise RuntimeError("Kah-BOOOOM!!!") - with worker.TaskHubGrpcWorker() as w: + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: w.add_orchestrator(mock_orchestrator) w.add_activity(throw_activity) w.start() + w.wait_for_ready(timeout=10) task_hub_client = client.TaskHubGrpcClient() id = task_hub_client.schedule_new_orchestration(mock_orchestrator) @@ -513,7 +587,7 @@ def empty_orchestrator(ctx: task.OrchestrationContext, _): ctx.set_custom_status("foobaz") # Start a worker, which will connect to the sidecar in a background thread - with worker.TaskHubGrpcWorker() as w: + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: w.add_orchestrator(empty_orchestrator) w.start() diff --git a/tests/durabletask/test_orchestration_e2e_async.py b/tests/durabletask/test_orchestration_e2e_async.py index c441bdc..b71e70b 100644 --- a/tests/durabletask/test_orchestration_e2e_async.py +++ b/tests/durabletask/test_orchestration_e2e_async.py @@ -110,7 +110,7 @@ def orchestrator(ctx: task.OrchestrationContext, input: int): return error_msg # Start a worker, which will connect to the sidecar in a background thread - with worker.TaskHubGrpcWorker() as w: + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: w.add_orchestrator(orchestrator) w.add_activity(throw) w.add_activity(increment_counter) @@ -153,7 +153,7 @@ def parent_orchestrator(ctx: task.OrchestrationContext, count: int): yield task.when_all(tasks) # Start a worker, which will connect to the sidecar in a background thread - with worker.TaskHubGrpcWorker() as w: + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: w.add_activity(increment) w.add_orchestrator(orchestrator_child) w.add_orchestrator(parent_orchestrator) @@ -178,7 +178,7 @@ def orchestrator(ctx: task.OrchestrationContext, _): return [a, b, c] # Start a worker, which will connect to the sidecar in a background thread - with worker.TaskHubGrpcWorker() as w: + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: w.add_orchestrator(orchestrator) w.start() @@ -208,7 +208,7 @@ def orchestrator(ctx: task.OrchestrationContext, _): return "timed out" # Start a worker, which will connect to the sidecar in a background thread - with worker.TaskHubGrpcWorker() as w: + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: w.add_orchestrator(orchestrator) w.start() @@ -234,7 +234,7 @@ def orchestrator(ctx: task.OrchestrationContext, _): return result # Start a worker, which will connect to the sidecar in a background thread - with worker.TaskHubGrpcWorker() as w: + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: w.add_orchestrator(orchestrator) w.start() # there could be a race condition if the workflow is scheduled before orchestrator is started @@ -275,7 +275,7 @@ def orchestrator(ctx: task.OrchestrationContext, _): return result # Start a worker, which will connect to the sidecar in a background thread - with worker.TaskHubGrpcWorker() as w: + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: w.add_orchestrator(orchestrator) w.start() @@ -302,7 +302,7 @@ def child(ctx: task.OrchestrationContext, _): return result # Start a worker, which will connect to the sidecar in a background thread - with worker.TaskHubGrpcWorker() as w: + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: w.add_orchestrator(root) w.add_orchestrator(child) w.start() @@ -345,7 +345,7 @@ def orchestrator(ctx: task.OrchestrationContext, input: int): return all_results # Start a worker, which will connect to the sidecar in a background thread - with worker.TaskHubGrpcWorker() as w: + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: w.add_orchestrator(orchestrator) w.start() @@ -376,13 +376,13 @@ async def test_retry_policies(): child_orch_counter = 0 throw_activity_counter = 0 - # Second setup: With retry policies + # Second setup: With retry policies (minimal delays for faster tests) retry_policy = task.RetryPolicy( - first_retry_interval=timedelta(seconds=1), + first_retry_interval=timedelta(seconds=0.05), # 0.1 → 0.05 (50% faster) max_number_of_attempts=3, backoff_coefficient=1, - max_retry_interval=timedelta(seconds=10), - retry_timeout=timedelta(seconds=30), + max_retry_interval=timedelta(seconds=0.5), # 1 → 0.5 + retry_timeout=timedelta(seconds=2), # 3 → 2 ) def parent_orchestrator_with_retry(ctx: task.OrchestrationContext, _): @@ -401,7 +401,7 @@ def throw_activity_with_retry(ctx: task.ActivityContext, _): throw_activity_counter += 1 raise RuntimeError("Kah-BOOOOM!!!") - with worker.TaskHubGrpcWorker() as w: + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: w.add_orchestrator(parent_orchestrator_with_retry) w.add_orchestrator(child_orchestrator_with_retry) w.add_activity(throw_activity_with_retry) @@ -423,16 +423,16 @@ def throw_activity_with_retry(ctx: task.ActivityContext, _): async def test_retry_timeout(): # This test verifies that the retry timeout is working as expected. - # Max number of attempts is 5 and retry timeout is 14 seconds. - # Total seconds consumed till 4th attempt is 1 + 2 + 4 + 8 = 15 seconds. - # So, the 5th attempt should not be made and the orchestration should fail. + # Max number of attempts is 5 and retry timeout is 1.7 seconds. + # Delays: 0.25 + 0.5 + 1.0 = 1.75 seconds cumulative before 4th attempt. + # So, the 5th attempt (which would happen at 1.75s) should not be made. throw_activity_counter = 0 retry_policy = task.RetryPolicy( first_retry_interval=timedelta(seconds=1), max_number_of_attempts=5, backoff_coefficient=2, max_retry_interval=timedelta(seconds=10), - retry_timeout=timedelta(seconds=14), + retry_timeout=timedelta(seconds=13), # Set just before 4th attempt ) def mock_orchestrator(ctx: task.OrchestrationContext, _): @@ -443,7 +443,7 @@ def throw_activity(ctx: task.ActivityContext, _): throw_activity_counter += 1 raise RuntimeError("Kah-BOOOOM!!!") - with worker.TaskHubGrpcWorker() as w: + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: w.add_orchestrator(mock_orchestrator) w.add_activity(throw_activity) w.start() @@ -465,7 +465,7 @@ def empty_orchestrator(ctx: task.OrchestrationContext, _): ctx.set_custom_status("foobaz") # Start a worker, which will connect to the sidecar in a background thread - with worker.TaskHubGrpcWorker() as w: + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: w.add_orchestrator(empty_orchestrator) w.start() diff --git a/tests/durabletask/test_orchestration_executor.py b/tests/durabletask/test_orchestration_executor.py index 964512f..bf81f26 100644 --- a/tests/durabletask/test_orchestration_executor.py +++ b/tests/durabletask/test_orchestration_executor.py @@ -3,7 +3,7 @@ import json import logging -from datetime import datetime, timedelta +from datetime import datetime, timedelta, timezone import pytest @@ -826,7 +826,7 @@ def test_nondeterminism_expected_sub_orchestration_task_completion_wrong_task_ty def orchestrator(ctx: task.OrchestrationContext, _): result = yield ctx.create_timer( - datetime.utcnow() + datetime.now(timezone.utc) ) # created timer but history expects sub-orchestration return result @@ -920,7 +920,7 @@ def orchestrator(ctx: task.OrchestrationContext, _): # Complete the timer task. The orchestration should move to the wait_for_external_event step, which # should then complete immediately because the event was buffered in the old event history. - timer_due_time = datetime.utcnow() + timedelta(days=1) + timer_due_time = datetime.now(timezone.utc) + timedelta(days=1) old_events = new_events + [helpers.new_timer_created_event(1, timer_due_time)] new_events = [helpers.new_timer_fired_event(1, timer_due_time)] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) @@ -1013,9 +1013,9 @@ def orchestrator(ctx: task.OrchestrationContext, input: int): helpers.new_event_raised_event("my_event", encoded_input="42"), helpers.new_event_raised_event("my_event", encoded_input="43"), helpers.new_event_raised_event("my_event", encoded_input="44"), - helpers.new_timer_created_event(1, datetime.utcnow() + timedelta(days=1)), + helpers.new_timer_created_event(1, datetime.now(timezone.utc) + timedelta(days=1)), ] - new_events = [helpers.new_timer_fired_event(1, datetime.utcnow() + timedelta(days=1))] + new_events = [helpers.new_timer_fired_event(1, datetime.now(timezone.utc) + timedelta(days=1))] # Execute the orchestration. It should be in a running state waiting for the timer to fire executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) @@ -1447,6 +1447,261 @@ def orchestrator(ctx: task.OrchestrationContext, _): assert str(ex) in complete_action.failureDetails.errorMessage +def test_activity_non_retryable_default_exception(): + """If activity fails with NonRetryableError, it should not be retried and orchestration should fail immediately.""" + + def dummy_activity(ctx, _): + raise task.NonRetryableError("boom") + + def orchestrator(ctx: task.OrchestrationContext, _): + yield ctx.call_activity( + dummy_activity, + retry_policy=task.RetryPolicy( + first_retry_interval=timedelta(seconds=1), + max_number_of_attempts=3, + backoff_coefficient=1, + ), + ) + + registry = worker._Registry() + name = registry.add_orchestrator(orchestrator) + + current_timestamp = datetime.utcnow() + old_events = [ + helpers.new_orchestrator_started_event(timestamp=current_timestamp), + helpers.new_execution_started_event(name, TEST_INSTANCE_ID, encoded_input=None), + helpers.new_task_scheduled_event(1, task.get_name(dummy_activity)), + ] + new_events = [ + helpers.new_orchestrator_started_event(timestamp=current_timestamp), + helpers.new_task_failed_event(1, task.NonRetryableError("boom")), + ] + + executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) + result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) + actions = result.actions + complete_action = get_and_validate_single_complete_orchestration_action(actions) + assert complete_action.orchestrationStatus == pb.ORCHESTRATION_STATUS_FAILED + assert complete_action.failureDetails.errorMessage.__contains__("Activity task #1 failed: boom") + + +def test_activity_non_retryable_policy_name(): + """If policy marks ValueError as non-retryable (by name), fail immediately without retry.""" + + def dummy_activity(ctx, _): + raise ValueError("boom") + + def orchestrator(ctx: task.OrchestrationContext, _): + yield ctx.call_activity( + dummy_activity, + retry_policy=task.RetryPolicy( + first_retry_interval=timedelta(seconds=1), + max_number_of_attempts=5, + non_retryable_error_types=["ValueError"], + ), + ) + + registry = worker._Registry() + name = registry.add_orchestrator(orchestrator) + + current_timestamp = datetime.utcnow() + old_events = [ + helpers.new_orchestrator_started_event(timestamp=current_timestamp), + helpers.new_execution_started_event(name, TEST_INSTANCE_ID, encoded_input=None), + helpers.new_task_scheduled_event(1, task.get_name(dummy_activity)), + ] + new_events = [ + helpers.new_orchestrator_started_event(timestamp=current_timestamp), + helpers.new_task_failed_event(1, ValueError("boom")), + ] + + executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) + result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) + actions = result.actions + complete_action = get_and_validate_single_complete_orchestration_action(actions) + assert complete_action.orchestrationStatus == pb.ORCHESTRATION_STATUS_FAILED + assert complete_action.failureDetails.errorMessage.__contains__("Activity task #1 failed: boom") + + +def test_activity_generic_exception_is_retryable(): + """Verify that generic Exception is retryable by default (not treated as non-retryable).""" + + def dummy_activity(ctx, _): + raise Exception("generic error") + + def orchestrator(ctx: task.OrchestrationContext, _): + yield ctx.call_activity( + dummy_activity, + retry_policy=task.RetryPolicy( + first_retry_interval=timedelta(seconds=1), + max_number_of_attempts=3, + backoff_coefficient=1, + ), + ) + + registry = worker._Registry() + name = registry.add_orchestrator(orchestrator) + + current_timestamp = datetime.utcnow() + # First attempt fails + old_events = [ + helpers.new_orchestrator_started_event(timestamp=current_timestamp), + helpers.new_execution_started_event(name, TEST_INSTANCE_ID, encoded_input=None), + helpers.new_task_scheduled_event(1, task.get_name(dummy_activity)), + ] + new_events = [ + helpers.new_orchestrator_started_event(timestamp=current_timestamp), + helpers.new_task_failed_event(1, Exception("generic error")), + ] + + executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) + result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) + actions = result.actions + # Should schedule a retry timer, not fail immediately + assert len(actions) == 1 + assert actions[0].HasField("createTimer") + assert actions[0].id == 2 + + # Simulate the timer firing and activity being rescheduled + expected_fire_at = current_timestamp + timedelta(seconds=1) + old_events = old_events + new_events + current_timestamp = expected_fire_at + new_events = [ + helpers.new_orchestrator_started_event(current_timestamp), + helpers.new_timer_fired_event(2, current_timestamp), + ] + result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) + actions = result.actions + assert len(actions) == 2 # timer + rescheduled task + assert actions[1].HasField("scheduleTask") + assert actions[1].id == 1 + + # Second attempt also fails + old_events = old_events + new_events + new_events = [ + helpers.new_orchestrator_started_event(current_timestamp), + helpers.new_task_failed_event(1, Exception("generic error")), + ] + + result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) + actions = result.actions + # Should schedule another retry timer + assert len(actions) == 3 + assert actions[2].HasField("createTimer") + assert actions[2].id == 3 + + # Simulate the timer firing and activity being rescheduled + expected_fire_at = current_timestamp + timedelta(seconds=1) + old_events = old_events + new_events + current_timestamp = expected_fire_at + new_events = [ + helpers.new_orchestrator_started_event(current_timestamp), + helpers.new_timer_fired_event(3, current_timestamp), + ] + result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) + actions = result.actions + assert len(actions) == 3 # timer + rescheduled task + assert actions[1].HasField("scheduleTask") + assert actions[1].id == 1 + + # Third attempt fails - should exhaust retries + old_events = old_events + new_events + new_events = [ + helpers.new_orchestrator_started_event(current_timestamp), + helpers.new_task_failed_event(1, Exception("generic error")), + ] + + result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) + actions = result.actions + # Now should fail - no more retries + complete_action = get_and_validate_single_complete_orchestration_action(actions) + assert complete_action.orchestrationStatus == pb.ORCHESTRATION_STATUS_FAILED + assert complete_action.failureDetails.errorMessage.__contains__( + "Activity task #1 failed: generic error" + ) + + +def test_sub_orchestration_non_retryable_default_exception(): + """If sub-orchestrator fails with NonRetryableError, do not retry and fail immediately.""" + + def child(ctx: task.OrchestrationContext, _): + pass + + def parent(ctx: task.OrchestrationContext, _): + yield ctx.call_sub_orchestrator( + child, + retry_policy=task.RetryPolicy( + first_retry_interval=timedelta(seconds=1), + max_number_of_attempts=3, + ), + ) + + registry = worker._Registry() + child_name = registry.add_orchestrator(child) + parent_name = registry.add_orchestrator(parent) + + current_timestamp = datetime.utcnow() + old_events = [ + helpers.new_orchestrator_started_event(timestamp=current_timestamp), + helpers.new_execution_started_event(parent_name, TEST_INSTANCE_ID, encoded_input=None), + helpers.new_sub_orchestration_created_event(1, child_name, "sub-1", encoded_input=None), + ] + new_events = [ + helpers.new_orchestrator_started_event(timestamp=current_timestamp), + helpers.new_sub_orchestration_failed_event(1, task.NonRetryableError("boom")), + ] + + executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) + result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) + actions = result.actions + complete_action = get_and_validate_single_complete_orchestration_action(actions) + assert complete_action.orchestrationStatus == pb.ORCHESTRATION_STATUS_FAILED + assert complete_action.failureDetails.errorMessage.__contains__( + "Sub-orchestration task #1 failed: boom" + ) + + +def test_sub_orchestration_non_retryable_policy_type(): + """If policy marks ValueError as non-retryable (by class), fail immediately without retry.""" + + def child(ctx: task.OrchestrationContext, _): + pass + + def parent(ctx: task.OrchestrationContext, _): + yield ctx.call_sub_orchestrator( + child, + retry_policy=task.RetryPolicy( + first_retry_interval=timedelta(seconds=1), + max_number_of_attempts=5, + non_retryable_error_types=[ValueError], + ), + ) + + registry = worker._Registry() + child_name = registry.add_orchestrator(child) + parent_name = registry.add_orchestrator(parent) + + current_timestamp = datetime.utcnow() + old_events = [ + helpers.new_orchestrator_started_event(timestamp=current_timestamp), + helpers.new_execution_started_event(parent_name, TEST_INSTANCE_ID, encoded_input=None), + helpers.new_sub_orchestration_created_event(1, child_name, "sub-1", encoded_input=None), + ] + new_events = [ + helpers.new_orchestrator_started_event(timestamp=current_timestamp), + helpers.new_sub_orchestration_failed_event(1, ValueError("boom")), + ] + + executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) + result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) + actions = result.actions + complete_action = get_and_validate_single_complete_orchestration_action(actions) + assert complete_action.orchestrationStatus == pb.ORCHESTRATION_STATUS_FAILED + assert complete_action.failureDetails.errorMessage.__contains__( + "Sub-orchestration task #1 failed: boom" + ) + + def get_and_validate_single_complete_orchestration_action( actions: list[pb.OrchestratorAction], ) -> pb.CompleteOrchestrationAction: diff --git a/tox.ini b/tox.ini index 9b21313..b6bc7ba 100644 --- a/tox.ini +++ b/tox.ini @@ -10,11 +10,9 @@ runner = virtualenv [testenv] # you can run tox with the e2e pytest marker using tox factors: -# tox -e py310,py311,py312,py313,py314 -- e2e -# or single one with: # tox -e py310-e2e -# to use custom grpc endpoint and not capture print statements (-s arg in pytest): -# DAPR_GRPC_ENDPOINT=localhost:12345 tox -e py310-e2e -- -s +# to use custom grpc endpoint: +# DAPR_GRPC_ENDPOINT=localhost:12345 tox -e py310-e2e setenv = PYTHONDONTWRITEBYTECODE=1 deps = .[dev] From c01d8b338c4d017776d026d405875a0dd9743dea Mon Sep 17 00:00:00 2001 From: Filinto Duran <1373693+filintod@users.noreply.github.com> Date: Wed, 12 Nov 2025 06:32:41 -0600 Subject: [PATCH 64/81] lint Signed-off-by: Filinto Duran <1373693+filintod@users.noreply.github.com> --- durabletask/task.py | 6 +++--- durabletask/worker.py | 11 +++++++---- tests/durabletask/test_client.py | 2 +- 3 files changed, 11 insertions(+), 8 deletions(-) diff --git a/durabletask/task.py b/durabletask/task.py index c24decf..0b27b6f 100644 --- a/durabletask/task.py +++ b/durabletask/task.py @@ -244,13 +244,13 @@ class NonRetryableError(Exception): def is_error_non_retryable(error_type: str, policy: RetryPolicy) -> bool: - """ Checks whether an error type is non-retryable.""" + """Checks whether an error type is non-retryable.""" is_non_retryable = False if error_type == "NonRetryableError": is_non_retryable = True elif ( - policy.non_retryable_error_types is not None - and error_type in policy.non_retryable_error_types + policy.non_retryable_error_types is not None + and error_type in policy.non_retryable_error_types ): is_non_retryable = True return is_non_retryable diff --git a/durabletask/worker.py b/durabletask/worker.py index d65e669..4be96c9 100644 --- a/durabletask/worker.py +++ b/durabletask/worker.py @@ -21,7 +21,6 @@ import durabletask.internal.shared as shared from durabletask import task from durabletask.internal.grpc_interceptor import DefaultClientInterceptorImpl -from durabletask.task import RetryPolicy TInput = TypeVar("TInput") TOutput = TypeVar("TOutput") @@ -448,7 +447,7 @@ def stream_reader(): else: self._logger.warning(f"Unexpected work item type: {request_type}") except grpc.RpcError: - raise # let it be captured/parsed by outer except and avoid noisy log + raise # let it be captured/parsed by outer except and avoid noisy log except Exception as e: self._logger.warning(f"Error in work item stream: {e}") raise e @@ -1144,7 +1143,9 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven if isinstance(activity_task, task.RetryableTask): if activity_task._retry_policy is not None: # Check for non-retryable errors by type name - if task.is_error_non_retryable(event.taskFailed.failureDetails.errorType, activity_task._retry_policy): + if task.is_error_non_retryable( + event.taskFailed.failureDetails.errorType, activity_task._retry_policy + ): activity_task.fail( f"{ctx.instance_id}: Activity task #{task_id} failed: {event.taskFailed.failureDetails.errorMessage}", event.taskFailed.failureDetails, @@ -1219,7 +1220,9 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven if isinstance(sub_orch_task, task.RetryableTask): if sub_orch_task._retry_policy is not None: # Check for non-retryable errors by type name - if task.is_error_non_retryable(failedEvent.failureDetails.errorType, sub_orch_task._retry_policy): + if task.is_error_non_retryable( + failedEvent.failureDetails.errorType, sub_orch_task._retry_policy + ): sub_orch_task.fail( f"Sub-orchestration task #{task_id} failed: {failedEvent.failureDetails.errorMessage}", failedEvent.failureDetails, diff --git a/tests/durabletask/test_client.py b/tests/durabletask/test_client.py index da72cb0..1cc97e4 100644 --- a/tests/durabletask/test_client.py +++ b/tests/durabletask/test_client.py @@ -1,4 +1,4 @@ -from unittest.mock import patch, MagicMock +from unittest.mock import MagicMock, patch from durabletask.internal.grpc_interceptor import DefaultClientInterceptorImpl from durabletask.internal.shared import get_default_host_address, get_grpc_channel From 1c194d00b6015b883b09554be160706e726dcc69 Mon Sep 17 00:00:00 2001 From: Filinto Duran <1373693+filintod@users.noreply.github.com> Date: Thu, 13 Nov 2025 09:54:30 -0600 Subject: [PATCH 65/81] feedback Signed-off-by: Filinto Duran <1373693+filintod@users.noreply.github.com> --- durabletask/worker.py | 80 ++++++++------------- tests/durabletask/test_client.py | 15 +++- tests/durabletask/test_orchestration_e2e.py | 16 +---- 3 files changed, 45 insertions(+), 66 deletions(-) diff --git a/durabletask/worker.py b/durabletask/worker.py index 4be96c9..8fcc763 100644 --- a/durabletask/worker.py +++ b/durabletask/worker.py @@ -253,8 +253,6 @@ def __init__( self._interceptors = None self._async_worker_manager = _AsyncWorkerManager(self._concurrency_options) - # Readiness flag set once the worker has an active stream to the sidecar - self._ready = Event() @property def concurrency_options(self) -> ConcurrencyOptions: @@ -357,8 +355,6 @@ def invalidate_connection(): pass current_channel = None current_stub = None - # No longer ready if connection is gone - self._ready.clear() def should_invalidate_connection(rpc_error): error_code = rpc_error.code() # type: ignore @@ -398,8 +394,6 @@ def should_invalidate_connection(rpc_error): self._logger.info( f"Successfully connected to {self._host_address}. Waiting for work items..." ) - # Signal readiness once stream is established - self._ready.set() # Use a thread to read from the blocking gRPC stream and forward to asyncio import queue @@ -508,14 +502,34 @@ def stop(self): self._async_worker_manager.shutdown() self._logger.info("Worker shutdown completed") self._is_running = False - self._ready.clear() - def wait_for_ready(self, timeout: Optional[float] = None) -> bool: - """Block until the worker has an active connection to the sidecar. - - Returns True if the worker became ready within the timeout; otherwise False. - """ - return self._ready.wait(timeout) + def _handle_grpc_execution_error(self, rpc_error: grpc.RpcError, request_type: str): + """Handle a gRPC execution error during shutdown or benign condition.""" + # During shutdown or if the instance was terminated, the channel may be close + # or the instance may no longer be recognized by the sidecar. Treat these as benign + # to reduce noisy logging when shutting down. + details = str(rpc_error).lower() + benign_errors = { + grpc.StatusCode.CANCELLED, + grpc.StatusCode.UNAVAILABLE, + grpc.StatusCode.UNKNOWN, + } + if ( + self._shutdown.is_set() + and rpc_error.code() in benign_errors + or ( + "unknown instance id/task id combo" in details + or "channel closed" in details + or "locally cancelled by application" in details + ) + ): + self._logger.debug( + f"Ignoring gRPC {request_type} execution error during shutdown/benign condition: {rpc_error}" + ) + else: + self._logger.exception( + f"Failed to execute gRPC {request_type} execution error: {rpc_error}" + ) def _execute_orchestrator( self, @@ -551,24 +565,7 @@ def _execute_orchestrator( try: stub.CompleteOrchestratorTask(res) except grpc.RpcError as rpc_error: # type: ignore - # During shutdown or if the instance was terminated, the channel may be closed - # or the instance may no longer be recognized by the sidecar. Treat these as benign - # to reduce noisy logging when shutting down. - code = rpc_error.code() # type: ignore - details = str(rpc_error) - benign = ( - code in {grpc.StatusCode.CANCELLED, grpc.StatusCode.UNAVAILABLE} - or "unknown instance ID/task ID combo" in details - or "Channel closed" in details - ) - if self._shutdown.is_set() or benign: - self._logger.debug( - f"Ignoring orchestrator completion delivery error during shutdown/benign condition: {rpc_error}" - ) - else: - self._logger.exception( - f"Failed to deliver orchestrator response for '{req.instanceId}' to sidecar: {rpc_error}" - ) + self._handle_grpc_execution_error(rpc_error, "orchestrator") except Exception as ex: self._logger.exception( f"Failed to deliver orchestrator response for '{req.instanceId}' to sidecar: {ex}" @@ -601,26 +598,7 @@ def _execute_activity( try: stub.CompleteActivityTask(res) except grpc.RpcError as rpc_error: # type: ignore - # Treat common shutdown/termination races as benign to avoid noisy logs - code = rpc_error.code() # type: ignore - details = str(rpc_error) - benign = code in { - grpc.StatusCode.CANCELLED, - grpc.StatusCode.UNAVAILABLE, - grpc.StatusCode.UNKNOWN, - } and ( - "unknown instance ID/task ID combo" in details - or "Channel closed" in details - or "Locally cancelled by application" in details - ) - if self._shutdown.is_set() or benign: - self._logger.debug( - f"Ignoring activity completion delivery error during shutdown/benign condition: {rpc_error}" - ) - else: - self._logger.exception( - f"Failed to deliver activity response for '{req.name}#{req.taskId}' of orchestration ID '{instance_id}' to sidecar: {rpc_error}" - ) + self._handle_grpc_execution_error(rpc_error, "activity") except Exception as ex: self._logger.exception( f"Failed to deliver activity response for '{req.name}#{req.taskId}' of orchestration ID '{instance_id}' to sidecar: {ex}" diff --git a/tests/durabletask/test_client.py b/tests/durabletask/test_client.py index 1cc97e4..c74ba17 100644 --- a/tests/durabletask/test_client.py +++ b/tests/durabletask/test_client.py @@ -1,5 +1,6 @@ from unittest.mock import MagicMock, patch +from durabletask import client from durabletask.internal.grpc_interceptor import DefaultClientInterceptorImpl from durabletask.internal.shared import get_default_host_address, get_grpc_channel @@ -149,8 +150,18 @@ def test_taskhub_client_close_handles_exceptions(): mock_channel.close.side_effect = Exception("close failed") mock_get_channel.return_value = mock_channel - from durabletask import client - task_hub_client = client.TaskHubGrpcClient() # Should not raise exception task_hub_client.close() + + +def test_taskhub_client_close_closes_channel_handles_exceptions(): + """Test that close() closes the channel and handles exceptions gracefully.""" + with patch("durabletask.internal.shared.get_grpc_channel") as mock_get_channel: + mock_channel = MagicMock() + mock_channel.close.side_effect = Exception("close failed") + mock_get_channel.return_value = mock_channel + + task_hub_client = client.TaskHubGrpcClient() + task_hub_client.close() + mock_channel.close.assert_called_once() diff --git a/tests/durabletask/test_orchestration_e2e.py b/tests/durabletask/test_orchestration_e2e.py index b64e446..5f4ca1c 100644 --- a/tests/durabletask/test_orchestration_e2e.py +++ b/tests/durabletask/test_orchestration_e2e.py @@ -58,7 +58,6 @@ def empty_orchestrator(ctx: task.OrchestrationContext, _): with worker.TaskHubGrpcWorker(channel_options=channel_options) as w: w.add_orchestrator(empty_orchestrator) w.start() - w.wait_for_ready(timeout=10) # set a custom max send length option c = client.TaskHubGrpcClient(channel_options=channel_options) @@ -98,7 +97,6 @@ def sequence(ctx: task.OrchestrationContext, start_val: int): w.add_orchestrator(sequence) w.add_activity(plus_one) w.start() - w.wait_for_ready(timeout=10) with client.TaskHubGrpcClient() as task_hub_client: id = task_hub_client.schedule_new_orchestration(sequence, input=1) @@ -143,7 +141,6 @@ def orchestrator(ctx: task.OrchestrationContext, input: int): w.add_activity(throw) w.add_activity(increment_counter) w.start() - w.wait_for_ready(timeout=10) with client.TaskHubGrpcClient() as task_hub_client: id = task_hub_client.schedule_new_orchestration(orchestrator, input=1) @@ -186,7 +183,6 @@ def parent_orchestrator(ctx: task.OrchestrationContext, count: int): w.add_orchestrator(orchestrator_child) w.add_orchestrator(parent_orchestrator) w.start() - w.wait_for_ready(timeout=10) with client.TaskHubGrpcClient() as task_hub_client: id = task_hub_client.schedule_new_orchestration(parent_orchestrator, input=10) @@ -209,7 +205,6 @@ def orchestrator(ctx: task.OrchestrationContext, _): with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: w.add_orchestrator(orchestrator) w.start() - w.wait_for_ready(timeout=10) # Start the orchestration and immediately raise events to it. task_hub_client = client.TaskHubGrpcClient() @@ -239,7 +234,6 @@ def orchestrator(ctx: task.OrchestrationContext, _): with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: w.add_orchestrator(orchestrator) w.start() - w.wait_for_ready(timeout=10) # Start the orchestration and immediately raise events to it. with client.TaskHubGrpcClient() as task_hub_client: @@ -265,7 +259,7 @@ def orchestrator(ctx: task.OrchestrationContext, _): with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: w.add_orchestrator(orchestrator) w.start() - w.wait_for_ready(timeout=10) + with client.TaskHubGrpcClient() as task_hub_client: id = task_hub_client.schedule_new_orchestration(orchestrator) state = task_hub_client.wait_for_orchestration_start(id, timeout=30) @@ -304,7 +298,7 @@ def orchestrator(ctx: task.OrchestrationContext, _): with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: w.add_orchestrator(orchestrator) w.start() - w.wait_for_ready(timeout=10) + with client.TaskHubGrpcClient() as task_hub_client: id = task_hub_client.schedule_new_orchestration(orchestrator) state = task_hub_client.wait_for_orchestration_start(id, timeout=30) @@ -348,7 +342,7 @@ def parent_orchestrator(ctx: task.OrchestrationContext, count: int): w.add_orchestrator(orchestrator_child) w.add_orchestrator(parent_orchestrator) w.start() - w.wait_for_ready(timeout=10) + with client.TaskHubGrpcClient() as task_hub_client: instance_id = task_hub_client.schedule_new_orchestration( parent_orchestrator, input=5 @@ -397,7 +391,6 @@ def orchestrator(ctx: task.OrchestrationContext, input: int): with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: w.add_orchestrator(orchestrator) w.start() - w.wait_for_ready(timeout=10) task_hub_client = client.TaskHubGrpcClient() id = task_hub_client.schedule_new_orchestration(orchestrator, input=0) @@ -498,7 +491,6 @@ def throw_activity_with_retry(ctx: task.ActivityContext, _): w.add_orchestrator(child_orchestrator_with_retry) w.add_activity(throw_activity_with_retry) w.start() - w.wait_for_ready(timeout=10) task_hub_client = client.TaskHubGrpcClient() id = task_hub_client.schedule_new_orchestration(parent_orchestrator_with_retry) @@ -529,7 +521,6 @@ def orchestrator_with_non_retryable(ctx: task.OrchestrationContext, _): w.add_orchestrator(orchestrator_with_non_retryable) w.add_activity(throw_non_retryable) w.start() - w.wait_for_ready(timeout=10) task_hub_client = client.TaskHubGrpcClient() id = task_hub_client.schedule_new_orchestration(orchestrator_with_non_retryable) @@ -568,7 +559,6 @@ def throw_activity(ctx: task.ActivityContext, _): w.add_orchestrator(mock_orchestrator) w.add_activity(throw_activity) w.start() - w.wait_for_ready(timeout=10) task_hub_client = client.TaskHubGrpcClient() id = task_hub_client.schedule_new_orchestration(mock_orchestrator) From 6697ef374f85048d6b8f22ac10d84d7744dbf53e Mon Sep 17 00:00:00 2001 From: Filinto Duran <1373693+filintod@users.noreply.github.com> Date: Thu, 13 Nov 2025 10:01:04 -0600 Subject: [PATCH 66/81] feedback, missing canceled status Signed-off-by: Filinto Duran <1373693+filintod@users.noreply.github.com> --- durabletask/client.py | 1 + tests/durabletask/test_orchestration_e2e.py | 1 + 2 files changed, 2 insertions(+) diff --git a/durabletask/client.py b/durabletask/client.py index ff4b326..e3d391f 100644 --- a/durabletask/client.py +++ b/durabletask/client.py @@ -32,6 +32,7 @@ class OrchestrationStatus(Enum): CONTINUED_AS_NEW = pb.ORCHESTRATION_STATUS_CONTINUED_AS_NEW PENDING = pb.ORCHESTRATION_STATUS_PENDING SUSPENDED = pb.ORCHESTRATION_STATUS_SUSPENDED + CANCELED = pb.ORCHESTRATION_STATUS_CANCELED def __str__(self): return helpers.get_orchestration_status_str(self.value) diff --git a/tests/durabletask/test_orchestration_e2e.py b/tests/durabletask/test_orchestration_e2e.py index 5f4ca1c..9debf39 100644 --- a/tests/durabletask/test_orchestration_e2e.py +++ b/tests/durabletask/test_orchestration_e2e.py @@ -36,6 +36,7 @@ def _wait_until_terminal( client.OrchestrationStatus.COMPLETED, client.OrchestrationStatus.FAILED, client.OrchestrationStatus.TERMINATED, + client.OrchestrationStatus.CANCELED, ): return st time.sleep(delay) From 94d593e2bcae9e160f2f9ff9a808bca865dd7eef Mon Sep 17 00:00:00 2001 From: Filinto Duran <1373693+filintod@users.noreply.github.com> Date: Fri, 14 Nov 2025 08:13:09 -0600 Subject: [PATCH 67/81] add deterministic methods and increase test coverage Signed-off-by: Filinto Duran <1373693+filintod@users.noreply.github.com> --- Makefile | 9 +- README.md | 32 ++ durabletask/deterministic.py | 224 ++++++++++ durabletask/worker.py | 7 +- tests/durabletask/test_deterministic.py | 455 ++++++++++++++++++++ tests/durabletask/test_orchestration_e2e.py | 179 ++++++++ tests/durabletask/test_registry.py | 154 +++++++ 7 files changed, 1057 insertions(+), 3 deletions(-) create mode 100644 durabletask/deterministic.py create mode 100644 tests/durabletask/test_deterministic.py create mode 100644 tests/durabletask/test_registry.py diff --git a/Makefile b/Makefile index 69daa40..3a387b0 100644 --- a/Makefile +++ b/Makefile @@ -7,6 +7,13 @@ test-unit: test-e2e: pytest -m e2e --verbose +coverage-clean: + rm -f .coverage .coverage.* coverage.xml + +coverage-all: coverage-clean + pytest -m "not e2e" --durations=0 --cov=durabletask --cov-branch --cov-report=term-missing --cov-report=xml + pytest -m e2e --durations=0 --cov=durabletask --cov-branch --cov-report=term-missing --cov-report=xml --cov-append + install: python3 -m pip install . @@ -18,4 +25,4 @@ gen-proto: python3 -m grpc_tools.protoc --proto_path=. --python_out=. --pyi_out=. --grpc_python_out=. ./durabletask/internal/orchestrator_service.proto rm durabletask/internal/*.proto -.PHONY: init test-unit test-e2e gen-proto install +.PHONY: init test-unit test-e2e coverage-clean coverage-unit coverage-e2e coverage-all gen-proto install diff --git a/README.md b/README.md index 40a4e6e..d4604e0 100644 --- a/README.md +++ b/README.md @@ -11,6 +11,38 @@ This repo contains a Python client SDK for use with the [Durable Task Framework > Note that this project is **not** currently affiliated with the [Durable Functions](https://docs.microsoft.com/azure/azure-functions/durable/durable-functions-overview) project for Azure Functions. If you are looking for a Python SDK for Durable Functions, please see [this repo](https://github.com/Azure/azure-functions-durable-python). +## Minimal worker setup + +To execute orchestrations and activities you must run a worker that connects to the Dapr Workflow sidecar and dispatches work on background threads: + +```python +from durabletask.worker import TaskHubGrpcWorker + +worker = TaskHubGrpcWorker(host_address="localhost:4001") + +worker.add_orchestrator(say_hello) +worker.add_activity(hello_activity) + +try: + worker.start() + # Worker runs in the background and processes work until stopped +finally: + worker.stop() +``` + +Always stop the worker when you're finished. The worker keeps polling threads alive; if you skip `stop()` they continue running and can prevent your process from shutting down cleanly after failures. You can rely on the context manager form to guarantee cleanup: + +```python +from durabletask.worker import TaskHubGrpcWorker + +with TaskHubGrpcWorker(host_address="localhost:4001") as worker: + worker.add_orchestrator(say_hello) + worker.add_activity(hello_activity) + worker.start() + # worker.stop() is called automatically on exit +``` + + ## Supported patterns The following orchestration patterns are currently supported. diff --git a/durabletask/deterministic.py b/durabletask/deterministic.py new file mode 100644 index 0000000..2943783 --- /dev/null +++ b/durabletask/deterministic.py @@ -0,0 +1,224 @@ +""" +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +""" +Deterministic utilities for Durable Task workflows (async and generator). + +This module provides deterministic alternatives to non-deterministic Python +functions, ensuring workflow replay consistency across different executions. +It is shared by both the asyncio authoring model and the generator-based model. +""" + +import hashlib +import random +import string as _string +import uuid +from collections.abc import Sequence +from dataclasses import dataclass +from datetime import datetime, timedelta +from typing import Optional, TypeVar + + +@dataclass +class DeterminismSeed: + """Seed data for deterministic operations.""" + + instance_id: str + orchestration_unix_ts: int + + def to_int(self) -> int: + """Convert seed to integer for PRNG initialization.""" + combined = f"{self.instance_id}:{self.orchestration_unix_ts}" + hash_bytes = hashlib.sha256(combined.encode("utf-8")).digest() + return int.from_bytes(hash_bytes[:8], byteorder="big") + + +def derive_seed(instance_id: str, orchestration_time: datetime) -> int: + """ + Derive a deterministic seed from instance ID and orchestration time. + """ + ts = int(orchestration_time.timestamp()) + return DeterminismSeed(instance_id=instance_id, orchestration_unix_ts=ts).to_int() + + +def deterministic_random(instance_id: str, orchestration_time: datetime) -> random.Random: + """ + Create a deterministic random number generator. + """ + seed = derive_seed(instance_id, orchestration_time) + return random.Random(seed) + + +def deterministic_uuid4(rnd: random.Random) -> uuid.UUID: + """ + Generate a deterministic UUID4 using the provided random generator. + + Note: This is deprecated in favor of deterministic_uuid_v5 which matches + the .NET implementation for cross-language compatibility. + """ + bytes_ = bytes(rnd.randrange(0, 256) for _ in range(16)) + bytes_list = list(bytes_) + bytes_list[6] = (bytes_list[6] & 0x0F) | 0x40 # Version 4 + bytes_list[8] = (bytes_list[8] & 0x3F) | 0x80 # Variant bits + return uuid.UUID(bytes=bytes(bytes_list)) + + +def deterministic_uuid_v5(instance_id: str, current_datetime: datetime, counter: int) -> uuid.UUID: + """ + Generate a deterministic UUID v5 matching the .NET implementation. + + This implementation matches the durabletask-dotnet NewGuid() method: + https://github.com/microsoft/durabletask-dotnet/blob/main/src/Worker/Core/Shims/TaskOrchestrationContextWrapper.cs + + Args: + instance_id: The orchestration instance ID. + current_datetime: The current orchestration datetime (frozen during replay). + counter: The per-call counter (starts at 0 on each replay). + + Returns: + A deterministic UUID v5 that will be the same across replays. + """ + # DNS namespace UUID - same as .NET DnsNamespaceValue + namespace = uuid.UUID("9e952958-5e33-4daf-827f-2fa12937b875") + + # Build name matching .NET format: instanceId_datetime_counter + # Using isoformat() which produces ISO 8601 format similar to .NET's ToString("o") + name = f"{instance_id}_{current_datetime.isoformat()}_{counter}" + + # Generate UUID v5 (SHA-1 based, matching .NET) + return uuid.uuid5(namespace, name) + + +class DeterministicContextMixin: + """ + Mixin providing deterministic helpers for workflow contexts. + + Assumes the inheriting class exposes `instance_id` and `current_utc_datetime` attributes. + + This implementation matches the .NET durabletask SDK approach with an explicit + counter for UUID generation that resets on each replay. + """ + + def __init__(self, *args, **kwargs): + """Initialize the mixin with UUID and timestamp counters.""" + super().__init__(*args, **kwargs) + # Counter for deterministic UUID generation (matches .NET newGuidCounter) + # This counter resets to 0 on each replay, ensuring determinism + self._uuid_counter: int = 0 + # Counter for deterministic timestamp sequencing (resets on replay) + self._timestamp_counter: int = 0 + + def now(self) -> datetime: + """Alias for deterministic current_utc_datetime.""" + return self.current_utc_datetime # type: ignore[attr-defined] + + def random(self) -> random.Random: + """Return a PRNG seeded deterministically from instance id and orchestration time.""" + rnd = deterministic_random( + self.instance_id, # type: ignore[attr-defined] + self.current_utc_datetime, # type: ignore[attr-defined] + ) + # Mark as deterministic for asyncio sandbox detector whitelisting of bound methods (randint, random) + try: + rnd._dt_deterministic = True + except Exception: + pass + return rnd + + def uuid4(self) -> uuid.UUID: + """ + Return a deterministically generated UUID v5 with explicit counter. + https://www.sohamkamani.com/uuid-versions-explained/#v5-non-random-uuids + + This matches the .NET implementation's NewGuid() method which uses: + - Instance ID + - Current UTC datetime (frozen during replay) + - Per-call counter (resets to 0 on each replay) + + The counter ensures multiple calls produce different UUIDs while maintaining + determinism across replays. + """ + # Lazily initialize counter if not set by __init__ (for compatibility) + if not hasattr(self, "_uuid_counter"): + self._uuid_counter = 0 + + result = deterministic_uuid_v5( + self.instance_id, # type: ignore[attr-defined] + self.current_utc_datetime, # type: ignore[attr-defined] + self._uuid_counter, + ) + self._uuid_counter += 1 + return result + + def new_guid(self) -> uuid.UUID: + """Alias for uuid4 for API parity with other SDKs.""" + return self.uuid4() + + def random_string(self, length: int, *, alphabet: Optional[str] = None) -> str: + """Return a deterministically generated random string of the given length.""" + if length < 0: + raise ValueError("length must be non-negative") + chars = alphabet if alphabet is not None else (_string.ascii_letters + _string.digits) + if not chars: + raise ValueError("alphabet must not be empty") + rnd = self.random() + size = len(chars) + return "".join(chars[rnd.randrange(0, size)] for _ in range(length)) + + def random_int(self, min_value: int = 0, max_value: int = 2**31 - 1) -> int: + """Return a deterministic random integer in the specified range.""" + if min_value > max_value: + raise ValueError("min_value must be <= max_value") + rnd = self.random() + return rnd.randint(min_value, max_value) + + T = TypeVar("T") + + def random_choice(self, sequence: Sequence[T]) -> T: + """Return a deterministic random element from a non-empty sequence.""" + if not sequence: + raise IndexError("Cannot choose from empty sequence") + rnd = self.random() + return rnd.choice(sequence) + + def now_with_sequence(self) -> datetime: + """ + Return deterministic timestamp with microsecond increment per call. + + Each call returns: current_utc_datetime + (counter * 1 microsecond) + + This provides ordered, unique timestamps for tracing/telemetry while maintaining + determinism across replays. The counter resets to 0 on each replay (similar to + _uuid_counter pattern). + + Perfect for preserving event ordering within a workflow without requiring activities. + + Returns: + datetime: Deterministic timestamp that increments on each call + + Example: + ```python + def workflow(ctx): + t1 = ctx.now_with_sequence() # 2024-01-01 12:00:00.000000 + result = yield ctx.call_activity(some_activity, input="data") + t2 = ctx.now_with_sequence() # 2024-01-01 12:00:00.000001 + # t1 < t2, preserving order for telemetry + ``` + """ + offset = timedelta(microseconds=self._timestamp_counter) + self._timestamp_counter += 1 + return self.current_utc_datetime + offset # type: ignore[attr-defined] + + def current_utc_datetime_with_sequence(self): + """Alias for now_with_sequence for API parity with other SDKs.""" + return self.now_with_sequence() diff --git a/durabletask/worker.py b/durabletask/worker.py index 8fcc763..29d67fc 100644 --- a/durabletask/worker.py +++ b/durabletask/worker.py @@ -19,7 +19,7 @@ import durabletask.internal.orchestrator_service_pb2 as pb import durabletask.internal.orchestrator_service_pb2_grpc as stubs import durabletask.internal.shared as shared -from durabletask import task +from durabletask import deterministic, task from durabletask.internal.grpc_interceptor import DefaultClientInterceptorImpl TInput = TypeVar("TInput") @@ -605,11 +605,14 @@ def _execute_activity( ) -class _RuntimeOrchestrationContext(task.OrchestrationContext): +class _RuntimeOrchestrationContext( + task.OrchestrationContext, deterministic.DeterministicContextMixin +): _generator: Optional[Generator[task.Task, Any, Any]] _previous_task: Optional[task.Task] def __init__(self, instance_id: str): + super().__init__() self._generator = None self._is_replaying = True self._is_complete = False diff --git a/tests/durabletask/test_deterministic.py b/tests/durabletask/test_deterministic.py new file mode 100644 index 0000000..f8f3acf --- /dev/null +++ b/tests/durabletask/test_deterministic.py @@ -0,0 +1,455 @@ +""" +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import random +import uuid +from datetime import datetime, timezone + +import pytest + +from durabletask.deterministic import ( + DeterminismSeed, + derive_seed, + deterministic_random, + deterministic_uuid4, + deterministic_uuid_v5, +) +from durabletask.worker import _RuntimeOrchestrationContext + + +class TestDeterminismSeed: + """Test DeterminismSeed dataclass and its methods.""" + + def test_to_int_produces_consistent_result(self): + """Test that to_int produces the same result for same inputs.""" + seed1 = DeterminismSeed(instance_id="test-123", orchestration_unix_ts=1234567890) + seed2 = DeterminismSeed(instance_id="test-123", orchestration_unix_ts=1234567890) + assert seed1.to_int() == seed2.to_int() + + def test_to_int_produces_different_results_for_different_instance_ids(self): + """Test that different instance IDs produce different seeds.""" + seed1 = DeterminismSeed(instance_id="test-123", orchestration_unix_ts=1234567890) + seed2 = DeterminismSeed(instance_id="test-456", orchestration_unix_ts=1234567890) + assert seed1.to_int() != seed2.to_int() + + def test_to_int_produces_different_results_for_different_timestamps(self): + """Test that different timestamps produce different seeds.""" + seed1 = DeterminismSeed(instance_id="test-123", orchestration_unix_ts=1234567890) + seed2 = DeterminismSeed(instance_id="test-123", orchestration_unix_ts=1234567891) + assert seed1.to_int() != seed2.to_int() + + def test_to_int_returns_positive_integer(self): + """Test that to_int returns a positive integer.""" + seed = DeterminismSeed(instance_id="test-123", orchestration_unix_ts=1234567890) + result = seed.to_int() + assert isinstance(result, int) + assert result >= 0 + + +class TestDeriveSeed: + """Test derive_seed function.""" + + def test_derive_seed_is_deterministic(self): + """Test that derive_seed produces consistent results.""" + instance_id = "test-instance" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + seed1 = derive_seed(instance_id, dt) + seed2 = derive_seed(instance_id, dt) + assert seed1 == seed2 + + def test_derive_seed_different_for_different_times(self): + """Test that different times produce different seeds.""" + instance_id = "test-instance" + dt1 = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + dt2 = datetime(2025, 1, 1, 12, 0, 1, tzinfo=timezone.utc) + seed1 = derive_seed(instance_id, dt1) + seed2 = derive_seed(instance_id, dt2) + assert seed1 != seed2 + + def test_derive_seed_handles_timezone_aware_datetime(self): + """Test that derive_seed works with timezone-aware datetimes.""" + instance_id = "test-instance" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + seed = derive_seed(instance_id, dt) + assert isinstance(seed, int) + + +class TestDeterministicRandom: + """Test deterministic_random function.""" + + def test_deterministic_random_returns_random_object(self): + """Test that deterministic_random returns a Random instance.""" + instance_id = "test-instance" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + rnd = deterministic_random(instance_id, dt) + assert isinstance(rnd, random.Random) + + def test_deterministic_random_produces_same_sequence(self): + """Test that same inputs produce same random sequence.""" + instance_id = "test-instance" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + rnd1 = deterministic_random(instance_id, dt) + rnd2 = deterministic_random(instance_id, dt) + + sequence1 = [rnd1.random() for _ in range(10)] + sequence2 = [rnd2.random() for _ in range(10)] + assert sequence1 == sequence2 + + def test_deterministic_random_different_for_different_inputs(self): + """Test that different inputs produce different sequences.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + rnd1 = deterministic_random("instance-1", dt) + rnd2 = deterministic_random("instance-2", dt) + + val1 = rnd1.random() + val2 = rnd2.random() + assert val1 != val2 + + +class TestDeterministicUuid4: + """Test deterministic_uuid4 function.""" + + def test_deterministic_uuid4_returns_valid_uuid(self): + """Test that deterministic_uuid4 returns a valid UUID4.""" + rnd = random.Random(42) + result = deterministic_uuid4(rnd) + assert isinstance(result, uuid.UUID) + assert result.version == 4 + + def test_deterministic_uuid4_is_deterministic(self): + """Test that same random state produces same UUID.""" + rnd1 = random.Random(42) + rnd2 = random.Random(42) + uuid1 = deterministic_uuid4(rnd1) + uuid2 = deterministic_uuid4(rnd2) + assert uuid1 == uuid2 + + def test_deterministic_uuid4_different_for_different_seeds(self): + """Test that different seeds produce different UUIDs.""" + rnd1 = random.Random(42) + rnd2 = random.Random(43) + uuid1 = deterministic_uuid4(rnd1) + uuid2 = deterministic_uuid4(rnd2) + assert uuid1 != uuid2 + + +class TestDeterministicUuidV5: + """Test deterministic_uuid_v5 function (matching .NET implementation).""" + + def test_deterministic_uuid_v5_returns_valid_uuid(self): + """Test that deterministic_uuid_v5 returns a valid UUID v5.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + result = deterministic_uuid_v5("test-instance", dt, 0) + assert isinstance(result, uuid.UUID) + assert result.version == 5 + + def test_deterministic_uuid_v5_is_deterministic(self): + """Test that same inputs produce same UUID.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + uuid1 = deterministic_uuid_v5("test-instance", dt, 0) + uuid2 = deterministic_uuid_v5("test-instance", dt, 0) + assert uuid1 == uuid2 + + def test_deterministic_uuid_v5_different_for_different_counters(self): + """Test that different counters produce different UUIDs.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + uuid1 = deterministic_uuid_v5("test-instance", dt, 0) + uuid2 = deterministic_uuid_v5("test-instance", dt, 1) + assert uuid1 != uuid2 + + def test_deterministic_uuid_v5_different_for_different_instance_ids(self): + """Test that different instance IDs produce different UUIDs.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + uuid1 = deterministic_uuid_v5("instance-1", dt, 0) + uuid2 = deterministic_uuid_v5("instance-2", dt, 0) + assert uuid1 != uuid2 + + def test_deterministic_uuid_v5_different_for_different_datetimes(self): + """Test that different datetimes produce different UUIDs.""" + dt1 = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + dt2 = datetime(2025, 1, 1, 12, 0, 1, tzinfo=timezone.utc) + uuid1 = deterministic_uuid_v5("test-instance", dt1, 0) + uuid2 = deterministic_uuid_v5("test-instance", dt2, 0) + assert uuid1 != uuid2 + + def test_deterministic_uuid_v5_matches_expected_format(self): + """Test that UUID v5 uses the correct namespace.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + result = deterministic_uuid_v5("test-instance", dt, 0) + # Should be deterministic - same inputs always produce same output + expected = deterministic_uuid_v5("test-instance", dt, 0) + assert result == expected + + def test_deterministic_uuid_v5_counter_sequence(self): + """Test that incrementing counter produces different UUIDs in sequence.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + uuids = [deterministic_uuid_v5("test-instance", dt, i) for i in range(5)] + # All should be different + assert len(set(uuids)) == 5 + # But calling with same counter should produce same UUID + assert uuids[0] == deterministic_uuid_v5("test-instance", dt, 0) + assert uuids[4] == deterministic_uuid_v5("test-instance", dt, 4) + + +def mock_deterministic_context( + instance_id: str, current_utc_datetime: datetime +) -> _RuntimeOrchestrationContext: + """Mock context for testing DeterministicContextMixin.""" + ctx = _RuntimeOrchestrationContext(instance_id) + ctx.current_utc_datetime = current_utc_datetime + return ctx + + +class TestDeterministicContextMixin: + """Test DeterministicContextMixin methods.""" + + def test_now_returns_current_utc_datetime(self): + """Test that now() returns the orchestration time.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + ctx = mock_deterministic_context("test-instance", dt) + assert ctx.now() == dt + + def test_random_returns_deterministic_prng(self): + """Test that random() returns a deterministic PRNG.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + ctx = mock_deterministic_context("test-instance", dt) + rnd1 = ctx.random() + rnd2 = ctx.random() + + # Both should produce same sequence + assert isinstance(rnd1, random.Random) + assert isinstance(rnd2, random.Random) + assert rnd1.random() == rnd2.random() + + def test_random_has_deterministic_marker(self): + """Test that random() sets _dt_deterministic marker.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + ctx = mock_deterministic_context("test-instance", dt) + rnd = ctx.random() + assert hasattr(rnd, "_dt_deterministic") + assert rnd._dt_deterministic is True + + def test_uuid4_generates_deterministic_uuid(self): + """Test that uuid4() generates deterministic UUIDs v5 with counter.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + ctx1 = mock_deterministic_context("test-instance", dt) + ctx2 = mock_deterministic_context("test-instance", dt) + + uuid1 = ctx1.uuid4() + uuid2 = ctx2.uuid4() + + assert isinstance(uuid1, uuid.UUID) + assert uuid1.version == 5 # Now using UUID v5 like .NET + assert uuid1 == uuid2 # Same counter (0) produces same UUID + + def test_uuid4_increments_counter(self): + """Test that uuid4() increments counter producing different UUIDs.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + ctx = mock_deterministic_context("test-instance", dt) + + uuid1 = ctx.uuid4() # counter=0 + uuid2 = ctx.uuid4() # counter=1 + uuid3 = ctx.uuid4() # counter=2 + + # All should be different due to counter + assert uuid1 != uuid2 + assert uuid2 != uuid3 + assert uuid1 != uuid3 + + def test_uuid4_counter_resets_on_replay(self): + """Test that counter resets on new context (simulating replay).""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + + # First execution + ctx1 = mock_deterministic_context("test-instance", dt) + uuid1_first = ctx1.uuid4() # counter=0 + uuid1_second = ctx1.uuid4() # counter=1 + + # Replay - new context, counter resets + ctx2 = mock_deterministic_context("test-instance", dt) + uuid2_first = ctx2.uuid4() # counter=0 + uuid2_second = ctx2.uuid4() # counter=1 + + # Same counter values produce same UUIDs (determinism!) + assert uuid1_first == uuid2_first + assert uuid1_second == uuid2_second + + def test_new_guid_is_alias_for_uuid4(self): + """Test that new_guid() is an alias for uuid4().""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + ctx = mock_deterministic_context("test-instance", dt) + + guid1 = ctx.new_guid() # counter=0 + guid2 = ctx.uuid4() # counter=1 + + # Both should be v5 UUIDs, but different due to counter increment + assert isinstance(guid1, uuid.UUID) + assert isinstance(guid2, uuid.UUID) + assert guid1.version == 5 + assert guid2.version == 5 + assert guid1 != guid2 # Different due to counter + + # Verify determinism - same counter produces same UUID + ctx2 = mock_deterministic_context("test-instance", dt) + guid3 = ctx2.new_guid() # counter=0 + assert guid3 == guid1 # Same as first call + + def test_random_string_generates_string_of_correct_length(self): + """Test that random_string() generates string of specified length.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + ctx = mock_deterministic_context("test-instance", dt) + + s = ctx.random_string(10) + assert len(s) == 10 + + def test_random_string_is_deterministic(self): + """Test that random_string() produces consistent results.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + ctx1 = mock_deterministic_context("test-instance", dt) + ctx2 = mock_deterministic_context("test-instance", dt) + + s1 = ctx1.random_string(20) + s2 = ctx2.random_string(20) + assert s1 == s2 + + def test_random_string_uses_default_alphabet(self): + """Test that random_string() uses alphanumeric characters by default.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + ctx = mock_deterministic_context("test-instance", dt) + + s = ctx.random_string(100) + assert all(c.isalnum() for c in s) + + def test_random_string_uses_custom_alphabet(self): + """Test that random_string() respects custom alphabet.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + ctx = mock_deterministic_context("test-instance", dt) + + s = ctx.random_string(50, alphabet="ABC") + assert all(c in "ABC" for c in s) + + def test_random_string_raises_on_negative_length(self): + """Test that random_string() raises ValueError for negative length.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + ctx = mock_deterministic_context("test-instance", dt) + + with pytest.raises(ValueError, match="length must be non-negative"): + ctx.random_string(-1) + + def test_random_string_raises_on_empty_alphabet(self): + """Test that random_string() raises ValueError for empty alphabet.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + ctx = mock_deterministic_context("test-instance", dt) + + with pytest.raises(ValueError, match="alphabet must not be empty"): + ctx.random_string(10, alphabet="") + + def test_random_string_handles_zero_length(self): + """Test that random_string() handles zero length correctly.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + ctx = mock_deterministic_context("test-instance", dt) + + s = ctx.random_string(0) + assert s == "" + + def test_random_int_generates_int_in_range(self): + """Test that random_int() generates integer in specified range.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + ctx = mock_deterministic_context("test-instance", dt) + + for _ in range(10): + val = ctx.random_int(10, 20) + assert 10 <= val <= 20 + + def test_random_int_is_deterministic(self): + """Test that random_int() produces consistent results.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + ctx1 = mock_deterministic_context("test-instance", dt) + ctx2 = mock_deterministic_context("test-instance", dt) + + val1 = ctx1.random_int(0, 1000) + val2 = ctx2.random_int(0, 1000) + assert val1 == val2 + + def test_random_int_uses_default_range(self): + """Test that random_int() uses default range when not specified.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + ctx = mock_deterministic_context("test-instance", dt) + + val = ctx.random_int() + assert 0 <= val <= 2**31 - 1 + + def test_random_int_raises_on_invalid_range(self): + """Test that random_int() raises ValueError when min > max.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + ctx = mock_deterministic_context("test-instance", dt) + + with pytest.raises(ValueError, match="min_value must be <= max_value"): + ctx.random_int(20, 10) + + def test_random_int_handles_same_min_and_max(self): + """Test that random_int() handles case where min equals max.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + ctx = mock_deterministic_context("test-instance", dt) + + val = ctx.random_int(42, 42) + assert val == 42 + + def test_random_choice_picks_from_sequence(self): + """Test that random_choice() picks element from sequence.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + ctx = mock_deterministic_context("test-instance", dt) + + choices = ["a", "b", "c", "d", "e"] + result = ctx.random_choice(choices) + assert result in choices + + def test_random_choice_is_deterministic(self): + """Test that random_choice() produces consistent results.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + ctx1 = mock_deterministic_context("test-instance", dt) + ctx2 = mock_deterministic_context("test-instance", dt) + + choices = list(range(100)) + result1 = ctx1.random_choice(choices) + result2 = ctx2.random_choice(choices) + assert result1 == result2 + + def test_random_choice_raises_on_empty_sequence(self): + """Test that random_choice() raises IndexError for empty sequence.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + ctx = mock_deterministic_context("test-instance", dt) + + with pytest.raises(IndexError, match="Cannot choose from empty sequence"): + ctx.random_choice([]) + + def test_random_choice_works_with_different_sequence_types(self): + """Test that random_choice() works with various sequence types.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + ctx = mock_deterministic_context("test-instance", dt) + + # List + result = ctx.random_choice([1, 2, 3]) + assert result in [1, 2, 3] + + # Reset context for deterministic behavior + ctx = mock_deterministic_context("test-instance", dt) + # Tuple + result = ctx.random_choice((1, 2, 3)) + assert result in (1, 2, 3) + + # Reset context for deterministic behavior + ctx = mock_deterministic_context("test-instance", dt) + # String + result = ctx.random_choice("abc") + assert result in "abc" diff --git a/tests/durabletask/test_orchestration_e2e.py b/tests/durabletask/test_orchestration_e2e.py index 9debf39..181d71d 100644 --- a/tests/durabletask/test_orchestration_e2e.py +++ b/tests/durabletask/test_orchestration_e2e.py @@ -594,3 +594,182 @@ def empty_orchestrator(ctx: task.OrchestrationContext, _): assert state.serialized_input is None assert state.serialized_output is None assert state.serialized_custom_status == '"foobaz"' + + +def test_now_with_sequence_ordering(): + """ + Test that now_with_sequence() maintains strict ordering across workflow execution. + + This verifies: + 1. Timestamps increment sequentially + 2. Order is preserved across activity calls + 3. Deterministic behavior (timestamps are consistent on replay) + """ + + def simple_activity(ctx, input_val: str): + return f"activity_{input_val}_done" + + def timestamp_ordering_workflow(ctx: task.OrchestrationContext, _): + timestamps = [] + + # First timestamp before any activities + t1 = ctx.now_with_sequence() + timestamps.append(("t1_before_activities", t1.isoformat())) + + # Call first activity + result1 = yield ctx.call_activity(simple_activity, input="first") + timestamps.append(("activity_1_result", result1)) + + # Timestamp after first activity + t2 = ctx.now_with_sequence() + timestamps.append(("t2_after_activity_1", t2.isoformat())) + + # Call second activity + result2 = yield ctx.call_activity(simple_activity, input="second") + timestamps.append(("activity_2_result", result2)) + + # Timestamp after second activity + t3 = ctx.now_with_sequence() + timestamps.append(("t3_after_activity_2", t3.isoformat())) + + # A few more rapid timestamps to test counter incrementing + t4 = ctx.now_with_sequence() + timestamps.append(("t4_rapid", t4.isoformat())) + + t5 = ctx.now_with_sequence() + timestamps.append(("t5_rapid", t5.isoformat())) + + # Return all timestamps for verification + return { + "timestamps": timestamps, + "t1": t1.isoformat(), + "t2": t2.isoformat(), + "t3": t3.isoformat(), + "t4": t4.isoformat(), + "t5": t5.isoformat(), + } + + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: + w.add_orchestrator(timestamp_ordering_workflow) + w.add_activity(simple_activity) + w.start() + + with client.TaskHubGrpcClient() as c: + instance_id = c.schedule_new_orchestration(timestamp_ordering_workflow) + state = c.wait_for_orchestration_completion( + instance_id, timeout=30, fetch_payloads=True + ) + + assert state is not None + assert state.runtime_status == client.OrchestrationStatus.COMPLETED + assert state.failure_details is None + + # Parse result + result = json.loads(state.serialized_output) + assert result is not None + + # Verify all timestamps are present + assert "t1" in result + assert "t2" in result + assert "t3" in result + assert "t4" in result + assert "t5" in result + + # Parse timestamps back to datetime objects for comparison + from datetime import datetime + + t1 = datetime.fromisoformat(result["t1"]) + t2 = datetime.fromisoformat(result["t2"]) + t3 = datetime.fromisoformat(result["t3"]) + t4 = datetime.fromisoformat(result["t4"]) + t5 = datetime.fromisoformat(result["t5"]) + + # Verify strict ordering: t1 < t2 < t3 < t4 < t5 + # This is the key guarantee - timestamps must maintain order for tracing + assert t1 < t2, f"t1 ({t1}) should be < t2 ({t2})" + assert t2 < t3, f"t2 ({t2}) should be < t3 ({t3})" + assert t3 < t4, f"t3 ({t3}) should be < t4 ({t4})" + assert t4 < t5, f"t4 ({t4}) should be < t5 ({t5})" + + # Verify that timestamps called in rapid succession (t3, t4, t5 with no activities between) + # have exactly 1 microsecond deltas. These happen within the same replay execution. + delta_t3_t4 = (t4 - t3).total_seconds() * 1_000_000 + delta_t4_t5 = (t5 - t4).total_seconds() * 1_000_000 + + assert delta_t3_t4 == 1.0, f"t3 to t4 should be 1 microsecond, got {delta_t3_t4}" + assert delta_t4_t5 == 1.0, f"t4 to t5 should be 1 microsecond, got {delta_t4_t5}" + + # Note: We don't check exact deltas for t1->t2 or t2->t3 because they span + # activity calls. During replay, current_utc_datetime changes based on event + # timestamps, so the base time shifts. However, ordering is still guaranteed. + + +def test_cannot_add_orchestrator_while_running(): + """Test that orchestrators cannot be added while the worker is running.""" + + def orchestrator(ctx: task.OrchestrationContext, _): + return "done" + + def another_orchestrator(ctx: task.OrchestrationContext, _): + return "another" + + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: + w.add_orchestrator(orchestrator) + w.start() + + # Try to add another orchestrator while running + with pytest.raises( + RuntimeError, match="Orchestrators cannot be added while the worker is running" + ): + w.add_orchestrator(another_orchestrator) + + +def test_cannot_add_activity_while_running(): + """Test that activities cannot be added while the worker is running.""" + + def activity(ctx: task.ActivityContext, input): + return input + + def another_activity(ctx: task.ActivityContext, input): + return input * 2 + + def orchestrator(ctx: task.OrchestrationContext, _): + return "done" + + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: + w.add_orchestrator(orchestrator) + w.add_activity(activity) + w.start() + + # Try to add another activity while running + with pytest.raises( + RuntimeError, match="Activities cannot be added while the worker is running" + ): + w.add_activity(another_activity) + + +def test_can_add_functions_after_stop(): + """Test that orchestrators/activities can be added after stopping the worker.""" + + def orchestrator1(ctx: task.OrchestrationContext, _): + return "done" + + def orchestrator2(ctx: task.OrchestrationContext, _): + return "done2" + + def activity(ctx: task.ActivityContext, input): + return input + + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: + w.add_orchestrator(orchestrator1) + w.start() + + c = client.TaskHubGrpcClient() + id = c.schedule_new_orchestration(orchestrator1) + state = c.wait_for_orchestration_completion(id, timeout=30) + assert state is not None + assert state.runtime_status == client.OrchestrationStatus.COMPLETED + + # Should be able to add after stop + w.add_orchestrator(orchestrator2) + w.add_activity(activity) diff --git a/tests/durabletask/test_registry.py b/tests/durabletask/test_registry.py new file mode 100644 index 0000000..edb623f --- /dev/null +++ b/tests/durabletask/test_registry.py @@ -0,0 +1,154 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +"""Unit tests for the _Registry class validation logic.""" + +import pytest + +from durabletask import worker + + +def test_registry_add_orchestrator_none(): + """Test that adding a None orchestrator raises ValueError.""" + registry = worker._Registry() + + with pytest.raises(ValueError, match="An orchestrator function argument is required"): + registry.add_orchestrator(None) + + +def test_registry_add_named_orchestrator_empty_name(): + """Test that adding an orchestrator with empty name raises ValueError.""" + registry = worker._Registry() + + def dummy_orchestrator(ctx, input): + return "done" + + with pytest.raises(ValueError, match="A non-empty orchestrator name is required"): + registry.add_named_orchestrator("", dummy_orchestrator) + + +def test_registry_add_orchestrator_duplicate(): + """Test that adding a duplicate orchestrator raises ValueError.""" + registry = worker._Registry() + + def dummy_orchestrator(ctx, input): + return "done" + + name = "test_orchestrator" + registry.add_named_orchestrator(name, dummy_orchestrator) + + with pytest.raises(ValueError, match=f"A '{name}' orchestrator already exists"): + registry.add_named_orchestrator(name, dummy_orchestrator) + + +def test_registry_add_activity_none(): + """Test that adding a None activity raises ValueError.""" + registry = worker._Registry() + + with pytest.raises(ValueError, match="An activity function argument is required"): + registry.add_activity(None) + + +def test_registry_add_named_activity_empty_name(): + """Test that adding an activity with empty name raises ValueError.""" + registry = worker._Registry() + + def dummy_activity(ctx, input): + return "done" + + with pytest.raises(ValueError, match="A non-empty activity name is required"): + registry.add_named_activity("", dummy_activity) + + +def test_registry_add_activity_duplicate(): + """Test that adding a duplicate activity raises ValueError.""" + registry = worker._Registry() + + def dummy_activity(ctx, input): + return "done" + + name = "test_activity" + registry.add_named_activity(name, dummy_activity) + + with pytest.raises(ValueError, match=f"A '{name}' activity already exists"): + registry.add_named_activity(name, dummy_activity) + + +def test_registry_get_orchestrator_exists(): + """Test retrieving an existing orchestrator.""" + registry = worker._Registry() + + def dummy_orchestrator(ctx, input): + return "done" + + name = registry.add_orchestrator(dummy_orchestrator) + retrieved = registry.get_orchestrator(name) + + assert retrieved is dummy_orchestrator + + +def test_registry_get_orchestrator_not_exists(): + """Test retrieving a non-existent orchestrator returns None.""" + registry = worker._Registry() + + retrieved = registry.get_orchestrator("non_existent") + + assert retrieved is None + + +def test_registry_get_activity_exists(): + """Test retrieving an existing activity.""" + registry = worker._Registry() + + def dummy_activity(ctx, input): + return "done" + + name = registry.add_activity(dummy_activity) + retrieved = registry.get_activity(name) + + assert retrieved is dummy_activity + + +def test_registry_get_activity_not_exists(): + """Test retrieving a non-existent activity returns None.""" + registry = worker._Registry() + + retrieved = registry.get_activity("non_existent") + + assert retrieved is None + + +def test_registry_add_multiple_orchestrators(): + """Test adding multiple different orchestrators.""" + registry = worker._Registry() + + def orchestrator1(ctx, input): + return "one" + + def orchestrator2(ctx, input): + return "two" + + name1 = registry.add_orchestrator(orchestrator1) + name2 = registry.add_orchestrator(orchestrator2) + + assert name1 != name2 + assert registry.get_orchestrator(name1) is orchestrator1 + assert registry.get_orchestrator(name2) is orchestrator2 + + +def test_registry_add_multiple_activities(): + """Test adding multiple different activities.""" + registry = worker._Registry() + + def activity1(ctx, input): + return "one" + + def activity2(ctx, input): + return "two" + + name1 = registry.add_activity(activity1) + name2 = registry.add_activity(activity2) + + assert name1 != name2 + assert registry.get_activity(name1) is activity1 + assert registry.get_activity(name2) is activity2 From 542e5edce72dc005fd6407636e2f937e771ef2e9 Mon Sep 17 00:00:00 2001 From: Filinto Duran <1373693+filintod@users.noreply.github.com> Date: Fri, 14 Nov 2025 08:34:21 -0600 Subject: [PATCH 68/81] missing serialization test from prev git Signed-off-by: Filinto Duran <1373693+filintod@users.noreply.github.com> --- tests/durabletask/test_serialization.py | 74 +++++++++++++++++++++++++ 1 file changed, 74 insertions(+) create mode 100644 tests/durabletask/test_serialization.py diff --git a/tests/durabletask/test_serialization.py b/tests/durabletask/test_serialization.py new file mode 100644 index 0000000..163d300 --- /dev/null +++ b/tests/durabletask/test_serialization.py @@ -0,0 +1,74 @@ +from collections import namedtuple +from dataclasses import dataclass +from types import SimpleNamespace + +from durabletask.internal.shared import AUTO_SERIALIZED, from_json, to_json + + +@dataclass +class SamplePayload: + count: int + name: str + + +def test_to_json_roundtrip_dataclass(): + payload = SamplePayload(count=5, name="widgets") + encoded = to_json(payload) + + assert AUTO_SERIALIZED in encoded + + decoded = from_json(encoded) + assert isinstance(decoded, SimpleNamespace) + assert decoded.count == 5 + assert decoded.name == "widgets" + + +def test_to_json_roundtrip_simplenamespace(): + payload = SimpleNamespace(foo="bar", baz=42) + encoded = to_json(payload) + + assert AUTO_SERIALIZED in encoded + + decoded = from_json(encoded) + assert isinstance(decoded, SimpleNamespace) + assert decoded.foo == "bar" + assert decoded.baz == 42 + + +def test_to_json_plain_dict_passthrough(): + payload = {"foo": "bar", "baz": 1} + encoded = to_json(payload) + + assert AUTO_SERIALIZED not in encoded + + decoded = from_json(encoded) + assert isinstance(decoded, dict) + assert decoded == {"foo": "bar", "baz": 1} + + +def test_to_json_namedtuple_roundtrip(): + Point = namedtuple("Point", ["x", "y"]) + payload = Point(3, 4) + encoded = to_json(payload) + + assert AUTO_SERIALIZED in encoded + + decoded = from_json(encoded) + assert isinstance(decoded, SimpleNamespace) + assert decoded.x == 3 + assert decoded.y == 4 + + +def test_to_json_nested_dataclass_collection(): + payload = [ + SamplePayload(count=1, name="first"), + SamplePayload(count=2, name="second"), + ] + encoded = to_json(payload) + + assert encoded.count(AUTO_SERIALIZED) >= 2 + + decoded = from_json(encoded) + assert isinstance(decoded, list) + assert [item.count for item in decoded] == [1, 2] + assert [item.name for item in decoded] == ["first", "second"] From 4f9855dcc387b256e40047c7b0e971a5b9e7d4ee Mon Sep 17 00:00:00 2001 From: Filinto Duran <1373693+filintod@users.noreply.github.com> Date: Fri, 14 Nov 2025 08:36:57 -0600 Subject: [PATCH 69/81] copyright fix Signed-off-by: Filinto Duran <1373693+filintod@users.noreply.github.com> --- tests/durabletask/test_registry.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/tests/durabletask/test_registry.py b/tests/durabletask/test_registry.py index edb623f..743330c 100644 --- a/tests/durabletask/test_registry.py +++ b/tests/durabletask/test_registry.py @@ -1,5 +1,15 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. +""" +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" """Unit tests for the _Registry class validation logic.""" From 9af47b9e05b93cddc8dfecb0504e30faff174eda Mon Sep 17 00:00:00 2001 From: Patrick Assuied Date: Sun, 16 Nov 2025 10:58:37 -0800 Subject: [PATCH 70/81] Make `compression` optional for grpcio as not all versions support it Signed-off-by: Patrick Assuied --- durabletask/aio/internal/grpc_interceptor.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/durabletask/aio/internal/grpc_interceptor.py b/durabletask/aio/internal/grpc_interceptor.py index 4c90ab1..843a95b 100644 --- a/durabletask/aio/internal/grpc_interceptor.py +++ b/durabletask/aio/internal/grpc_interceptor.py @@ -40,13 +40,14 @@ def _intercept_call( metadata = [] metadata.extend(self._metadata) + compression = getattr(client_call_details, "compression", None) return _ClientCallDetails( client_call_details.method, client_call_details.timeout, metadata, client_call_details.credentials, client_call_details.wait_for_ready, - client_call_details.compression, + compression, ) async def intercept_unary_unary(self, continuation, client_call_details, request): From e149057bf262e909978fbc49c90723f5388a1109 Mon Sep 17 00:00:00 2001 From: Filinto Duran <1373693+filintod@users.noreply.github.com> Date: Mon, 17 Nov 2025 15:46:24 -0600 Subject: [PATCH 71/81] feedback license Signed-off-by: Filinto Duran <1373693+filintod@users.noreply.github.com> --- tests/durabletask/test_serialization.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tests/durabletask/test_serialization.py b/tests/durabletask/test_serialization.py index 163d300..68f7f14 100644 --- a/tests/durabletask/test_serialization.py +++ b/tests/durabletask/test_serialization.py @@ -1,3 +1,16 @@ +""" +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + from collections import namedtuple from dataclasses import dataclass from types import SimpleNamespace From 930b909321af36bc54fa7b41a3a627a7ddb740d7 Mon Sep 17 00:00:00 2001 From: Filinto Duran <1373693+filintod@users.noreply.github.com> Date: Tue, 18 Nov 2025 10:25:23 -0600 Subject: [PATCH 72/81] Update Makefile Co-authored-by: Albert Callarisa Signed-off-by: Filinto Duran <1373693+filintod@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 3a387b0..be0a317 100644 --- a/Makefile +++ b/Makefile @@ -25,4 +25,4 @@ gen-proto: python3 -m grpc_tools.protoc --proto_path=. --python_out=. --pyi_out=. --grpc_python_out=. ./durabletask/internal/orchestrator_service.proto rm durabletask/internal/*.proto -.PHONY: init test-unit test-e2e coverage-clean coverage-unit coverage-e2e coverage-all gen-proto install +.PHONY: init test-unit test-e2e coverage-clean coverage-all gen-proto install From b344d9b566adc0443d9ec3327917640e5b449df2 Mon Sep 17 00:00:00 2001 From: Casper Nielsen Date: Mon, 15 Dec 2025 15:58:25 +0100 Subject: [PATCH 73/81] feat: change logger init to use getLogger. This allows clients to override log level externally from client code Signed-off-by: Casper Nielsen --- durabletask/internal/shared.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/durabletask/internal/shared.py b/durabletask/internal/shared.py index 3adb6b1..09645ed 100644 --- a/durabletask/internal/shared.py +++ b/durabletask/internal/shared.py @@ -97,12 +97,11 @@ def get_logger( log_handler: Optional[logging.Handler] = None, log_formatter: Optional[logging.Formatter] = None, ) -> logging.Logger: - logger = logging.Logger(f"durabletask-{name_suffix}") + logger = logging.getLogger(f"durabletask-{name_suffix}") # Add a default log handler if none is provided if log_handler is None: log_handler = logging.StreamHandler() - log_handler.setLevel(logging.INFO) logger.handlers.append(log_handler) # Set a default log formatter to our handler if none is provided From 51bfe1cad272c169e5f834c89fc2a9fc56a6b58f Mon Sep 17 00:00:00 2001 From: Samantha Coyle Date: Wed, 14 Jan 2026 16:39:16 -0600 Subject: [PATCH 74/81] fix: correct thread gathering and shutdown logic to prevent app hang Signed-off-by: Samantha Coyle --- durabletask/worker.py | 163 +++++++++++++++++++++++++++++++++++------- 1 file changed, 137 insertions(+), 26 deletions(-) diff --git a/durabletask/worker.py b/durabletask/worker.py index 29d67fc..01f7bb0 100644 --- a/durabletask/worker.py +++ b/durabletask/worker.py @@ -6,6 +6,7 @@ import logging import os import random +import threading from concurrent.futures import ThreadPoolExecutor from datetime import datetime, timedelta from threading import Event, Thread @@ -26,6 +27,15 @@ TOutput = TypeVar("TOutput") +def _log_all_threads(logger: logging.Logger, context: str = ""): + """Helper function to log all currently active threads for debugging.""" + active_threads = threading.enumerate() + thread_info = [] + for t in active_threads: + thread_info.append(f"name='{t.name}', id={t.ident}, daemon={t.daemon}, alive={t.is_alive()}") + logger.debug(f"[THREAD_TRACE] {context} Active threads ({len(active_threads)}): {', '.join(thread_info)}") + + class ConcurrencyOptions: """Configuration options for controlling concurrency of different work item types and the thread pool size. @@ -131,6 +141,7 @@ class ActivityNotRegisteredError(ValueError): pass +# TODO: refactor this to closely match durabletask-go/client/worker_grpc.go instead of this. class TaskHubGrpcWorker: """A gRPC-based worker for processing durable task orchestrations and activities. @@ -236,6 +247,7 @@ def __init__( self._secure_channel = secure_channel self._channel_options = channel_options self._stop_timeout = stop_timeout + self._current_channel: Optional[grpc.Channel] = None # Store channel reference for cleanup # Use provided concurrency options or create default ones self._concurrency_options = ( @@ -288,11 +300,20 @@ def run_loop(): loop.run_until_complete(self._async_run_loop()) self._logger.info(f"Starting gRPC worker that connects to {self._host_address}") - self._runLoop = Thread(target=run_loop) + self._runLoop = Thread(target=run_loop, name="WorkerRunLoop") self._runLoop.start() self._is_running = True + # TODO: refactor this to be more readable and maintainable. async def _async_run_loop(self): + """ + This is the main async loop that runs the worker. + It is responsible for: + - Creating a fresh connection to the sidecar + - Reading work items from the sidecar + - Executing the work items + - Shutting down the worker + """ worker_task = asyncio.create_task(self._async_worker_manager.run()) # Connection state management for retry fix current_channel = None @@ -317,6 +338,8 @@ def create_fresh_connection(): self._interceptors, options=self._channel_options, ) + # Store channel reference for cleanup in stop() + self._current_channel = current_channel current_stub = stubs.TaskHubSidecarServiceStub(current_channel) current_stub.Hello(empty_pb2.Empty()) conn_retry_count = 0 @@ -324,6 +347,7 @@ def create_fresh_connection(): except Exception as e: self._logger.warning(f"Failed to create connection: {e}") current_channel = None + self._current_channel = None current_stub = None raise @@ -354,6 +378,7 @@ def invalidate_connection(): except Exception: pass current_channel = None + self._current_channel = None current_stub = None def should_invalidate_connection(rpc_error): @@ -367,7 +392,9 @@ def should_invalidate_connection(rpc_error): } return error_code in connection_level_errors - while not self._shutdown.is_set(): + while True: + if self._shutdown.is_set(): + break if current_stub is None: try: create_fresh_connection() @@ -399,25 +426,57 @@ def should_invalidate_connection(rpc_error): import queue work_item_queue = queue.Queue() + SHUTDOWN_SENTINEL = None + # NOTE: This is equivalent to the Durabletask Go goroutine calling stream.Recv() in worker_grpc.go StartWorkItemListener() def stream_reader(): try: stream = self._response_stream if stream is None: return - for work_item in stream: # type: ignore - work_item_queue.put(work_item) + while True: + if self._shutdown.is_set(): + break + + try: + work_item = next(stream) + work_item_queue.put(work_item) + except StopIteration: + # stream ended naturally + break + except Exception as e: + work_item_queue.put(e) except Exception as e: work_item_queue.put(e) + finally: + # signal that the stream reader is done (ie matching Go's context cancellation) + try: + work_item_queue.put(SHUTDOWN_SENTINEL) + except Exception as e: + # queue might be closed so ignore this + pass import threading - current_reader_thread = threading.Thread(target=stream_reader, daemon=True) + # Use non-daemon thread (daemon=False) to ensure proper resource cleanup. + # Daemon threads exit immediately when the main program exits, which prevents + # cleanup of gRPC channel resources and OTel interceptors. Non-daemon threads + # block shutdown until they complete, ensuring all resources are properly closed. + current_reader_thread = threading.Thread(target=stream_reader, daemon=False, name="StreamReader") current_reader_thread.start() loop = asyncio.get_running_loop() + + # NOTE: This is a blocking call that will wait for a work item to become available or the shutdown sentinel while not self._shutdown.is_set(): try: - work_item = await loop.run_in_executor(None, work_item_queue.get) + + # Use timeout to allow shutdown check (mimicing Go's select with ctx.Done()) + work_item = await loop.run_in_executor( + None, + lambda: work_item_queue.get(timeout=0.1)) + # Essentially check for ctx.Done() in Go + if work_item == SHUTDOWN_SENTINEL: + break if isinstance(work_item, Exception): raise work_item request_type = work_item.WhichOneof("request") @@ -440,6 +499,8 @@ def stream_reader(): pass else: self._logger.warning(f"Unexpected work item type: {request_type}") + except queue.Empty: + continue except grpc.RpcError: raise # let it be captured/parsed by outer except and avoid noisy log except Exception as e: @@ -486,7 +547,18 @@ def stream_reader(): invalidate_connection() self._logger.info("No longer listening for work items") self._async_worker_manager.shutdown() - await worker_task + + # Cancel worker_task to ensure shutdown completes even if tasks are still running + worker_task.cancel() + try: + # Wait for cancellation to complete, with a timeout to prevent indefinite waiting + await asyncio.wait_for(worker_task, timeout=5.0) + except asyncio.CancelledError: + self._logger.debug("Worker task cancelled during shutdown") + except asyncio.TimeoutError: + self._logger.warning("Worker task did not complete within timeout during shutdown") + except Exception as e: + self._logger.warning(f"Error while waiting for worker task shutdown: {e}") def stop(self): """Stops the worker and waits for any pending work items to complete.""" @@ -497,8 +569,25 @@ def stop(self): self._shutdown.set() if self._response_stream is not None: self._response_stream.cancel() + # Explicitly close the gRPC channel to ensure OTel interceptors and other resources are cleaned up + if self._current_channel is not None: + try: + self._current_channel.close() + except Exception as e: + self._logger.exception(f"Error closing gRPC channel: {e}") + finally: + self._current_channel = None + if self._runLoop is not None: self._runLoop.join(timeout=self._stop_timeout) + if self._runLoop.is_alive(): + self._logger.warning( + f"Worker thread did not complete within {self._stop_timeout}s timeout. " + "Some resources may not be fully cleaned up." + ) + else: + self._logger.debug("Worker thread completed successfully") + self._async_worker_manager.shutdown() self._logger.info("Worker shutdown completed") self._is_running = False @@ -1506,26 +1595,43 @@ async def _consume_queue(self, queue: asyncio.Queue, semaphore: asyncio.Semaphor # List to track running tasks running_tasks: set[asyncio.Task] = set() - while True: - # Clean up completed tasks - done_tasks = {task for task in running_tasks if task.done()} - running_tasks -= done_tasks + try: + while True: + # Clean up completed tasks + done_tasks = {task for task in running_tasks if task.done()} + running_tasks -= done_tasks - # Exit if shutdown is set and the queue is empty and no tasks are running - if self._shutdown and queue.empty() and not running_tasks: - break + # Exit if shutdown is set and the queue is empty and no tasks are running + if self._shutdown and queue.empty() and not running_tasks: + break - try: - work = await asyncio.wait_for(queue.get(), timeout=1.0) - except asyncio.TimeoutError: - continue - - func, args, kwargs = work - # Create a concurrent task for processing - task = asyncio.create_task( - self._process_work_item(semaphore, queue, func, args, kwargs) - ) - running_tasks.add(task) + try: + work = await asyncio.wait_for(queue.get(), timeout=1.0) + except asyncio.TimeoutError: + # Check for cancellation during timeout and exit while loop if shutting down + if self._shutdown: + break + continue # otherwise wait for work item to become available and loop again + except asyncio.CancelledError: + # Propagate cancellation + raise + + func, args, kwargs = work + # Create a concurrent task for processing + task = asyncio.create_task( + self._process_work_item(semaphore, queue, func, args, kwargs) + ) + running_tasks.add(task) + # handle the cancellation bubbled up from the loop + except asyncio.CancelledError: + # Cancel any remaining running tasks + for task in running_tasks: + if not task.done(): + task.cancel() + # Wait briefly for tasks to cancel, but don't block indefinitely + if running_tasks: + await asyncio.gather(*running_tasks, return_exceptions=True) + raise async def _process_work_item( self, semaphore: asyncio.Semaphore, queue: asyncio.Queue, func, args, kwargs @@ -1548,7 +1654,8 @@ async def _run_func(self, func, *args, **kwargs): and getattr(self.thread_pool, "_shutdown", False) ): return None - return await loop.run_in_executor(self.thread_pool, lambda: func(*args, **kwargs)) + result = await loop.run_in_executor(self.thread_pool, lambda: func(*args, **kwargs)) + return result def submit_activity(self, func, *args, **kwargs): work_item = (func, args, kwargs) @@ -1570,6 +1677,10 @@ def submit_orchestration(self, func, *args, **kwargs): def shutdown(self): self._shutdown = True + # Shutdown thread pool. Since we've already cancelled worker_task and set _shutdown=True, + # no new work should be submitted and existing work should complete quickly. + # ThreadPoolExecutor.shutdown(wait=True) doesn't support a timeout, but with proper + # cancellation in place, threads should exit promptly, otherwise this will hang and block shutdown for the application. self.thread_pool.shutdown(wait=True) def reset_for_new_run(self): From 1ff4521bb92d5d625892bca8b174f08d06a9ed73 Mon Sep 17 00:00:00 2001 From: Samantha Coyle Date: Wed, 14 Jan 2026 18:01:42 -0600 Subject: [PATCH 75/81] fix: add graceful shutdown to response_stream thread Signed-off-by: Samantha Coyle --- durabletask/worker.py | 110 ++++++++++++++++++++++++++++++++++++------ 1 file changed, 95 insertions(+), 15 deletions(-) diff --git a/durabletask/worker.py b/durabletask/worker.py index 01f7bb0..9e176c0 100644 --- a/durabletask/worker.py +++ b/durabletask/worker.py @@ -363,12 +363,7 @@ def invalidate_connection(): # Wait for the reader thread to finish if current_reader_thread is not None: - try: - current_reader_thread.join(timeout=2) - if current_reader_thread.is_alive(): - self._logger.warning("Stream reader thread did not shut down gracefully") - except Exception: - pass + current_reader_thread.join(timeout=1) current_reader_thread = None # Close the channel @@ -434,20 +429,46 @@ def stream_reader(): stream = self._response_stream if stream is None: return + + # Use next() to allow shutdown check between items + # This matches Go's pattern: check ctx.Err() after each stream.Recv() while True: if self._shutdown.is_set(): break - + try: + # NOTE: next(stream) blocks until gRPC returns the next work item or cancels the stream. + # There is no way to interrupt this blocking call in Python gRPC. When shutdown is + # initiated, the channel closure propagates to this call, which can take several seconds. + # The thread will exit once gRPC raises grpc.RpcError with StatusCode.CANCELLED. work_item = next(stream) + # Check shutdown again after getting item (in case shutdown happened during next()) + if self._shutdown.is_set(): + break work_item_queue.put(work_item) except StopIteration: # stream ended naturally break - except Exception as e: - work_item_queue.put(e) + except grpc.RpcError as rpc_error: + # Check if this is due to shutdown/cancellation + if self._shutdown.is_set() or rpc_error.code() == grpc.StatusCode.CANCELLED: + self._logger.debug(f"Stream reader: stream cancelled during shutdown (code={rpc_error.code()})") + break + # Other RPC errors - put in queue for async loop to handle + self._logger.warning(f"Stream reader: RPC error (code={rpc_error.code()}): {rpc_error}") + break + except Exception as stream_error: + # Check if this is due to shutdown + if self._shutdown.is_set(): + self._logger.info(f"Stream reader: exception during shutdown: {type(stream_error).__name__}: {stream_error}") + break + # Other stream errors - put in queue for async loop to handle + self._logger.warning(f"Stream reader: unexpected error: {stream_error}") + break + except Exception as e: - work_item_queue.put(e) + if not self._shutdown.is_set(): + work_item_queue.put(e) finally: # signal that the stream reader is done (ie matching Go's context cancellation) try: @@ -477,6 +498,13 @@ def stream_reader(): # Essentially check for ctx.Done() in Go if work_item == SHUTDOWN_SENTINEL: break + + if self._shutdown.is_set(): + self._logger.debug("Shutdown detected, ignoring work item") + break + if self._async_worker_manager._shutdown or loop.is_closed(): + self._logger.debug("Async worker manager shut down or loop closed, exiting work item processing") + break if isinstance(work_item, Exception): raise work_item request_type = work_item.WhichOneof("request") @@ -504,11 +532,25 @@ def stream_reader(): except grpc.RpcError: raise # let it be captured/parsed by outer except and avoid noisy log except Exception as e: - self._logger.warning(f"Error in work item stream: {e}") + if self._async_worker_manager._shutdown or loop.is_closed(): + break + invalidate_connection() raise e current_reader_thread.join(timeout=1) - self._logger.info("Work item stream ended normally") + + if self._shutdown.is_set(): + self._logger.info(f"Disconnected from {self._host_address}") + else: + self._logger.info("Work item stream ended normally") + # When stream ends (SHUTDOWN_SENTINEL received), always break outer loop + # The stream reader has exited, so we should exit too, not reconnect + # This matches Go SDK behavior where stream ending causes the listener to exit + break except grpc.RpcError as rpc_error: + # Check shutdown first - if shutting down, exit immediately + if self._shutdown.is_set(): + self._logger.debug("Shutdown detected during RPC error handling, exiting") + break should_invalidate = should_invalidate_connection(rpc_error) if should_invalidate: invalidate_connection() @@ -539,11 +581,41 @@ def stream_reader(): self._logger.warning( f"Application-level gRPC error ({error_code}): {rpc_error}" ) - self._shutdown.wait(1) + except RuntimeError as ex: + # RuntimeError often indicates asyncio loop issues (e.g., "cannot schedule new futures after shutdown") + # Check shutdown state first + if self._shutdown.is_set(): + self._logger.debug(f"Shutdown detected during RuntimeError handling, exiting: {ex}") + break + # Check if async worker manager is shut down or loop is closed + try: + loop = asyncio.get_running_loop() + if self._async_worker_manager._shutdown or loop.is_closed(): + self._logger.debug(f"Async worker manager shut down or loop closed, exiting: {ex}") + break + except RuntimeError: + # No event loop running, treat as shutdown + self._logger.debug(f"No event loop running, exiting: {ex}") + break + # If we can't get the loop or it's in a bad state, and we got a RuntimeError, + # it's likely shutdown-related. Break to prevent infinite retries. + break except Exception as ex: + if self._shutdown.is_set(): + self._logger.debug(f"Shutdown detected during exception handling, exiting: {ex}") + break + # Check if async worker manager is shut down or loop is closed + try: + loop = asyncio.get_running_loop() + if self._async_worker_manager._shutdown or loop.is_closed(): + self._logger.debug(f"Async worker manager shut down or loop closed, exiting: {ex}") + break + except RuntimeError: + # No event loop running, treat as shutdown + self._logger.debug(f"No event loop running, exiting: {ex}") + break invalidate_connection() self._logger.warning(f"Unexpected error: {ex}") - self._shutdown.wait(1) invalidate_connection() self._logger.info("No longer listening for work items") self._async_worker_manager.shutdown() @@ -566,9 +638,9 @@ def stop(self): return self._logger.info("Stopping gRPC worker...") - self._shutdown.set() if self._response_stream is not None: self._response_stream.cancel() + self._shutdown.set() # Explicitly close the gRPC channel to ensure OTel interceptors and other resources are cleaned up if self._current_channel is not None: try: @@ -1517,8 +1589,12 @@ def __init__(self, concurrency_options: ConcurrencyOptions): def _ensure_queues_for_current_loop(self): """Ensure queues are bound to the current event loop.""" + if self._shutdown: + return try: current_loop = asyncio.get_running_loop() + if current_loop.is_closed(): + return except RuntimeError: # No event loop running, can't create queues return @@ -1658,6 +1734,8 @@ async def _run_func(self, func, *args, **kwargs): return result def submit_activity(self, func, *args, **kwargs): + if self._shutdown: + return work_item = (func, args, kwargs) self._ensure_queues_for_current_loop() if self.activity_queue is not None: @@ -1667,6 +1745,8 @@ def submit_activity(self, func, *args, **kwargs): self._pending_activity_work.append(work_item) def submit_orchestration(self, func, *args, **kwargs): + if self._shutdown: + return work_item = (func, args, kwargs) self._ensure_queues_for_current_loop() if self.orchestration_queue is not None: From 1a30848bc9d63daed9476a9c7e930efe915990d1 Mon Sep 17 00:00:00 2001 From: Samantha Coyle Date: Thu, 15 Jan 2026 08:46:05 -0600 Subject: [PATCH 76/81] fix: address feedback Signed-off-by: Samantha Coyle --- durabletask/worker.py | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/durabletask/worker.py b/durabletask/worker.py index 9e176c0..2c4482d 100644 --- a/durabletask/worker.py +++ b/durabletask/worker.py @@ -460,11 +460,10 @@ def stream_reader(): except Exception as stream_error: # Check if this is due to shutdown if self._shutdown.is_set(): - self._logger.info(f"Stream reader: exception during shutdown: {type(stream_error).__name__}: {stream_error}") - break + self._logger.debug(f"Stream reader: exception during shutdown: {type(stream_error).__name__}: {stream_error}") # Other stream errors - put in queue for async loop to handle self._logger.warning(f"Stream reader: unexpected error: {stream_error}") - break + raise except Exception as e: if not self._shutdown.is_set(): @@ -499,9 +498,6 @@ def stream_reader(): if work_item == SHUTDOWN_SENTINEL: break - if self._shutdown.is_set(): - self._logger.debug("Shutdown detected, ignoring work item") - break if self._async_worker_manager._shutdown or loop.is_closed(): self._logger.debug("Async worker manager shut down or loop closed, exiting work item processing") break @@ -618,7 +614,6 @@ def stream_reader(): self._logger.warning(f"Unexpected error: {ex}") invalidate_connection() self._logger.info("No longer listening for work items") - self._async_worker_manager.shutdown() # Cancel worker_task to ensure shutdown completes even if tasks are still running worker_task.cancel() @@ -725,8 +720,9 @@ def _execute_orchestrator( try: stub.CompleteOrchestratorTask(res) - except grpc.RpcError as rpc_error: # type: ignore - self._handle_grpc_execution_error(rpc_error, "orchestrator") + except grpc.RpcError: + # except grpc.RpcError as rpc_error: # type: ignore + raise # self._handle_grpc_execution_error(rpc_error, "orchestrator") except Exception as ex: self._logger.exception( f"Failed to deliver orchestrator response for '{req.instanceId}' to sidecar: {ex}" @@ -758,8 +754,10 @@ def _execute_activity( try: stub.CompleteActivityTask(res) - except grpc.RpcError as rpc_error: # type: ignore - self._handle_grpc_execution_error(rpc_error, "activity") + # except grpc.RpcError as rpc_error: # type: ignore + # self._handle_grpc_execution_error(rpc_error, "activity") + except grpc.RpcError: + raise except Exception as ex: self._logger.exception( f"Failed to deliver activity response for '{req.name}#{req.taskId}' of orchestration ID '{instance_id}' to sidecar: {ex}" From ffb21592a39b3b9f68179a1acfac2c1017a49d0d Mon Sep 17 00:00:00 2001 From: Samantha Coyle Date: Thu, 15 Jan 2026 08:50:35 -0600 Subject: [PATCH 77/81] style: put back a func i tried commenting out Signed-off-by: Samantha Coyle --- durabletask/worker.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/durabletask/worker.py b/durabletask/worker.py index 2c4482d..4b012cd 100644 --- a/durabletask/worker.py +++ b/durabletask/worker.py @@ -659,6 +659,7 @@ def stop(self): self._logger.info("Worker shutdown completed") self._is_running = False + # TODO: This should be removed in the future as we do handle grpc errs def _handle_grpc_execution_error(self, rpc_error: grpc.RpcError, request_type: str): """Handle a gRPC execution error during shutdown or benign condition.""" # During shutdown or if the instance was terminated, the channel may be close @@ -720,9 +721,8 @@ def _execute_orchestrator( try: stub.CompleteOrchestratorTask(res) - except grpc.RpcError: - # except grpc.RpcError as rpc_error: # type: ignore - raise # self._handle_grpc_execution_error(rpc_error, "orchestrator") + except grpc.RpcError as rpc_error: # type: ignore + self._handle_grpc_execution_error(rpc_error, "orchestrator") except Exception as ex: self._logger.exception( f"Failed to deliver orchestrator response for '{req.instanceId}' to sidecar: {ex}" @@ -754,10 +754,8 @@ def _execute_activity( try: stub.CompleteActivityTask(res) - # except grpc.RpcError as rpc_error: # type: ignore - # self._handle_grpc_execution_error(rpc_error, "activity") - except grpc.RpcError: - raise + except grpc.RpcError as rpc_error: # type: ignore + self._handle_grpc_execution_error(rpc_error, "activity") except Exception as ex: self._logger.exception( f"Failed to deliver activity response for '{req.name}#{req.taskId}' of orchestration ID '{instance_id}' to sidecar: {ex}" From 2b06ef2b05e725150444dfd18dfe08b3b83e319f Mon Sep 17 00:00:00 2001 From: Samantha Coyle Date: Thu, 15 Jan 2026 08:53:42 -0600 Subject: [PATCH 78/81] style: appease linter Signed-off-by: Samantha Coyle --- durabletask/worker.py | 78 ++++++++++++++++++++++++++++--------------- 1 file changed, 52 insertions(+), 26 deletions(-) diff --git a/durabletask/worker.py b/durabletask/worker.py index 4b012cd..bf78b73 100644 --- a/durabletask/worker.py +++ b/durabletask/worker.py @@ -32,8 +32,12 @@ def _log_all_threads(logger: logging.Logger, context: str = ""): active_threads = threading.enumerate() thread_info = [] for t in active_threads: - thread_info.append(f"name='{t.name}', id={t.ident}, daemon={t.daemon}, alive={t.is_alive()}") - logger.debug(f"[THREAD_TRACE] {context} Active threads ({len(active_threads)}): {', '.join(thread_info)}") + thread_info.append( + f"name='{t.name}', id={t.ident}, daemon={t.daemon}, alive={t.is_alive()}" + ) + logger.debug( + f"[THREAD_TRACE] {context} Active threads ({len(active_threads)}): {', '.join(thread_info)}" + ) class ConcurrencyOptions: @@ -429,13 +433,13 @@ def stream_reader(): stream = self._response_stream if stream is None: return - + # Use next() to allow shutdown check between items # This matches Go's pattern: check ctx.Err() after each stream.Recv() while True: if self._shutdown.is_set(): break - + try: # NOTE: next(stream) blocks until gRPC returns the next work item or cancels the stream. # There is no way to interrupt this blocking call in Python gRPC. When shutdown is @@ -451,20 +455,31 @@ def stream_reader(): break except grpc.RpcError as rpc_error: # Check if this is due to shutdown/cancellation - if self._shutdown.is_set() or rpc_error.code() == grpc.StatusCode.CANCELLED: - self._logger.debug(f"Stream reader: stream cancelled during shutdown (code={rpc_error.code()})") + if ( + self._shutdown.is_set() + or rpc_error.code() == grpc.StatusCode.CANCELLED + ): + self._logger.debug( + f"Stream reader: stream cancelled during shutdown (code={rpc_error.code()})" + ) break # Other RPC errors - put in queue for async loop to handle - self._logger.warning(f"Stream reader: RPC error (code={rpc_error.code()}): {rpc_error}") + self._logger.warning( + f"Stream reader: RPC error (code={rpc_error.code()}): {rpc_error}" + ) break except Exception as stream_error: # Check if this is due to shutdown if self._shutdown.is_set(): - self._logger.debug(f"Stream reader: exception during shutdown: {type(stream_error).__name__}: {stream_error}") + self._logger.debug( + f"Stream reader: exception during shutdown: {type(stream_error).__name__}: {stream_error}" + ) # Other stream errors - put in queue for async loop to handle - self._logger.warning(f"Stream reader: unexpected error: {stream_error}") + self._logger.warning( + f"Stream reader: unexpected error: {stream_error}" + ) raise - + except Exception as e: if not self._shutdown.is_set(): work_item_queue.put(e) @@ -472,7 +487,7 @@ def stream_reader(): # signal that the stream reader is done (ie matching Go's context cancellation) try: work_item_queue.put(SHUTDOWN_SENTINEL) - except Exception as e: + except Exception: # queue might be closed so ignore this pass @@ -482,24 +497,27 @@ def stream_reader(): # Daemon threads exit immediately when the main program exits, which prevents # cleanup of gRPC channel resources and OTel interceptors. Non-daemon threads # block shutdown until they complete, ensuring all resources are properly closed. - current_reader_thread = threading.Thread(target=stream_reader, daemon=False, name="StreamReader") + current_reader_thread = threading.Thread( + target=stream_reader, daemon=False, name="StreamReader" + ) current_reader_thread.start() loop = asyncio.get_running_loop() # NOTE: This is a blocking call that will wait for a work item to become available or the shutdown sentinel while not self._shutdown.is_set(): try: - # Use timeout to allow shutdown check (mimicing Go's select with ctx.Done()) work_item = await loop.run_in_executor( - None, - lambda: work_item_queue.get(timeout=0.1)) + None, lambda: work_item_queue.get(timeout=0.1) + ) # Essentially check for ctx.Done() in Go if work_item == SHUTDOWN_SENTINEL: break - + if self._async_worker_manager._shutdown or loop.is_closed(): - self._logger.debug("Async worker manager shut down or loop closed, exiting work item processing") + self._logger.debug( + "Async worker manager shut down or loop closed, exiting work item processing" + ) break if isinstance(work_item, Exception): raise work_item @@ -533,7 +551,7 @@ def stream_reader(): invalidate_connection() raise e current_reader_thread.join(timeout=1) - + if self._shutdown.is_set(): self._logger.info(f"Disconnected from {self._host_address}") else: @@ -581,13 +599,17 @@ def stream_reader(): # RuntimeError often indicates asyncio loop issues (e.g., "cannot schedule new futures after shutdown") # Check shutdown state first if self._shutdown.is_set(): - self._logger.debug(f"Shutdown detected during RuntimeError handling, exiting: {ex}") + self._logger.debug( + f"Shutdown detected during RuntimeError handling, exiting: {ex}" + ) break # Check if async worker manager is shut down or loop is closed try: loop = asyncio.get_running_loop() if self._async_worker_manager._shutdown or loop.is_closed(): - self._logger.debug(f"Async worker manager shut down or loop closed, exiting: {ex}") + self._logger.debug( + f"Async worker manager shut down or loop closed, exiting: {ex}" + ) break except RuntimeError: # No event loop running, treat as shutdown @@ -598,13 +620,17 @@ def stream_reader(): break except Exception as ex: if self._shutdown.is_set(): - self._logger.debug(f"Shutdown detected during exception handling, exiting: {ex}") + self._logger.debug( + f"Shutdown detected during exception handling, exiting: {ex}" + ) break # Check if async worker manager is shut down or loop is closed try: loop = asyncio.get_running_loop() if self._async_worker_manager._shutdown or loop.is_closed(): - self._logger.debug(f"Async worker manager shut down or loop closed, exiting: {ex}") + self._logger.debug( + f"Async worker manager shut down or loop closed, exiting: {ex}" + ) break except RuntimeError: # No event loop running, treat as shutdown @@ -614,7 +640,7 @@ def stream_reader(): self._logger.warning(f"Unexpected error: {ex}") invalidate_connection() self._logger.info("No longer listening for work items") - + # Cancel worker_task to ensure shutdown completes even if tasks are still running worker_task.cancel() try: @@ -644,7 +670,7 @@ def stop(self): self._logger.exception(f"Error closing gRPC channel: {e}") finally: self._current_channel = None - + if self._runLoop is not None: self._runLoop.join(timeout=self._stop_timeout) if self._runLoop.is_alive(): @@ -654,7 +680,7 @@ def stop(self): ) else: self._logger.debug("Worker thread completed successfully") - + self._async_worker_manager.shutdown() self._logger.info("Worker shutdown completed") self._is_running = False @@ -1683,7 +1709,7 @@ async def _consume_queue(self, queue: asyncio.Queue, semaphore: asyncio.Semaphor # Check for cancellation during timeout and exit while loop if shutting down if self._shutdown: break - continue # otherwise wait for work item to become available and loop again + continue # otherwise wait for work item to become available and loop again except asyncio.CancelledError: # Propagate cancellation raise From 6e096bb5dd5638c6cde9078c1bdd12133d704ae1 Mon Sep 17 00:00:00 2001 From: Samantha Coyle Date: Thu, 15 Jan 2026 10:09:28 -0600 Subject: [PATCH 79/81] fix(test): append to pending list regardless of shutdown or not Signed-off-by: Samantha Coyle --- durabletask/worker.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/durabletask/worker.py b/durabletask/worker.py index bf78b73..5ad1ccc 100644 --- a/durabletask/worker.py +++ b/durabletask/worker.py @@ -431,8 +431,6 @@ def should_invalidate_connection(rpc_error): def stream_reader(): try: stream = self._response_stream - if stream is None: - return # Use next() to allow shutdown check between items # This matches Go's pattern: check ctx.Err() after each stream.Recv() @@ -1611,8 +1609,6 @@ def __init__(self, concurrency_options: ConcurrencyOptions): def _ensure_queues_for_current_loop(self): """Ensure queues are bound to the current event loop.""" - if self._shutdown: - return try: current_loop = asyncio.get_running_loop() if current_loop.is_closed(): @@ -1756,8 +1752,6 @@ async def _run_func(self, func, *args, **kwargs): return result def submit_activity(self, func, *args, **kwargs): - if self._shutdown: - return work_item = (func, args, kwargs) self._ensure_queues_for_current_loop() if self.activity_queue is not None: @@ -1767,8 +1761,6 @@ def submit_activity(self, func, *args, **kwargs): self._pending_activity_work.append(work_item) def submit_orchestration(self, func, *args, **kwargs): - if self._shutdown: - return work_item = (func, args, kwargs) self._ensure_queues_for_current_loop() if self.orchestration_queue is not None: From ccebcc4c77890c1805569b504718d3d9c4161c1c Mon Sep 17 00:00:00 2001 From: Albert Callarisa Date: Fri, 16 Jan 2026 15:34:32 +0100 Subject: [PATCH 80/81] Workflow versioning Signed-off-by: Albert Callarisa --- durabletask/client.py | 1 + durabletask/internal/PROTO_SOURCE_COMMIT_HASH | 2 +- durabletask/internal/helpers.py | 9 + .../internal/orchestrator_service_pb2.py | 472 +++++++++--------- .../internal/orchestrator_service_pb2.pyi | 117 ++++- .../internal/orchestrator_service_pb2_grpc.py | 86 ++++ durabletask/task.py | 16 + durabletask/worker.py | 122 ++++- tests/durabletask/test_registry.py | 49 +- 9 files changed, 614 insertions(+), 260 deletions(-) diff --git a/durabletask/client.py b/durabletask/client.py index e3d391f..06faf85 100644 --- a/durabletask/client.py +++ b/durabletask/client.py @@ -33,6 +33,7 @@ class OrchestrationStatus(Enum): PENDING = pb.ORCHESTRATION_STATUS_PENDING SUSPENDED = pb.ORCHESTRATION_STATUS_SUSPENDED CANCELED = pb.ORCHESTRATION_STATUS_CANCELED + STALLED = pb.ORCHESTRATION_STATUS_STALLED def __str__(self): return helpers.get_orchestration_status_str(self.value) diff --git a/durabletask/internal/PROTO_SOURCE_COMMIT_HASH b/durabletask/internal/PROTO_SOURCE_COMMIT_HASH index 80179d7..1abbaf1 100644 --- a/durabletask/internal/PROTO_SOURCE_COMMIT_HASH +++ b/durabletask/internal/PROTO_SOURCE_COMMIT_HASH @@ -1 +1 @@ -4b86756497d875b97f9a91051781b5711c1e4fa6 +889781bbe90e6ec84ebe169978c4f2fd0df74ff0 diff --git a/durabletask/internal/helpers.py b/durabletask/internal/helpers.py index 8b67219..3f04728 100644 --- a/durabletask/internal/helpers.py +++ b/durabletask/internal/helpers.py @@ -188,6 +188,15 @@ def new_complete_orchestration_action( ) +def new_orchestrator_version_not_available_action( + id: int, +) -> pb.OrchestratorAction: + return pb.OrchestratorAction( + id=id, + orchestratorVersionNotAvailable=pb.OrchestratorVersionNotAvailableAction(), + ) + + def new_create_timer_action(id: int, fire_at: datetime) -> pb.OrchestratorAction: timestamp = timestamp_pb2.Timestamp() timestamp.FromDatetime(fire_at) diff --git a/durabletask/internal/orchestrator_service_pb2.py b/durabletask/internal/orchestrator_service_pb2.py index ed91507..9315572 100644 --- a/durabletask/internal/orchestrator_service_pb2.py +++ b/durabletask/internal/orchestrator_service_pb2.py @@ -28,7 +28,7 @@ from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n/durabletask/internal/orchestrator_service.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1bgoogle/protobuf/empty.proto\"K\n\nTaskRouter\x12\x13\n\x0bsourceAppID\x18\x01 \x01(\t\x12\x18\n\x0btargetAppID\x18\x02 \x01(\tH\x00\x88\x01\x01\x42\x0e\n\x0c_targetAppID\"^\n\x15OrchestrationInstance\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x86\x02\n\x0f\x41\x63tivityRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\x12\x0e\n\x06taskId\x18\x05 \x01(\x05\x12)\n\x12parentTraceContext\x18\x06 \x01(\x0b\x32\r.TraceContext\x12\x17\n\x0ftaskExecutionId\x18\x07 \x01(\t\"\xaa\x01\n\x10\x41\x63tivityResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0e\n\x06taskId\x18\x02 \x01(\x05\x12,\n\x06result\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\x12\x17\n\x0f\x63ompletionToken\x18\x05 \x01(\t\"\xb2\x01\n\x12TaskFailureDetails\x12\x11\n\terrorType\x18\x01 \x01(\t\x12\x14\n\x0c\x65rrorMessage\x18\x02 \x01(\t\x12\x30\n\nstackTrace\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x0cinnerFailure\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\x12\x16\n\x0eisNonRetriable\x18\x05 \x01(\x08\"\xdd\x01\n\x12ParentInstanceInfo\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12*\n\x04name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\x12\x12\n\x05\x61ppID\x18\x05 \x01(\tH\x00\x88\x01\x01\x42\x08\n\x06_appID\"i\n\x0cTraceContext\x12\x13\n\x0btraceParent\x18\x01 \x01(\t\x12\x12\n\x06spanID\x18\x02 \x01(\tB\x02\x18\x01\x12\x30\n\ntraceState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xe5\x03\n\x15\x45xecutionStartedEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\x12+\n\x0eparentInstance\x18\x05 \x01(\x0b\x32\x13.ParentInstanceInfo\x12;\n\x17scheduledStartTimestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12)\n\x12parentTraceContext\x18\x07 \x01(\x0b\x32\r.TraceContext\x12\x39\n\x13orchestrationSpanID\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x04tags\x18\t \x03(\x0b\x32 .ExecutionStartedEvent.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xa7\x01\n\x17\x45xecutionCompletedEvent\x12\x31\n\x13orchestrationStatus\x18\x01 \x01(\x0e\x32\x14.OrchestrationStatus\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x03 \x01(\x0b\x32\x13.TaskFailureDetails\"X\n\x18\x45xecutionTerminatedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x0f\n\x07recurse\x18\x02 \x01(\x08\"\xc2\x01\n\x12TaskScheduledEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x12parentTraceContext\x18\x04 \x01(\x0b\x32\r.TraceContext\x12\x17\n\x0ftaskExecutionId\x18\x05 \x01(\t\"t\n\x12TaskCompletedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x17\n\x0ftaskExecutionId\x18\x03 \x01(\t\"p\n\x0fTaskFailedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\x12\x17\n\x0ftaskExecutionId\x18\x03 \x01(\t\"\xcf\x01\n$SubOrchestrationInstanceCreatedEvent\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x12parentTraceContext\x18\x05 \x01(\x0b\x32\r.TraceContext\"o\n&SubOrchestrationInstanceCompletedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"k\n#SubOrchestrationInstanceFailedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\"[\n\x11TimerCreatedEvent\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x11\n\x04name\x18\x02 \x01(\tH\x00\x88\x01\x01\x42\x07\n\x05_name\"N\n\x0fTimerFiredEvent\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0f\n\x07timerId\x18\x02 \x01(\x05\"\x1a\n\x18OrchestratorStartedEvent\"\x1c\n\x1aOrchestratorCompletedEvent\"_\n\x0e\x45ventSentEvent\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"M\n\x10\x45ventRaisedEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x05input\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\":\n\x0cGenericEvent\x12*\n\x04\x64\x61ta\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"D\n\x11HistoryStateEvent\x12/\n\x12orchestrationState\x18\x01 \x01(\x0b\x32\x13.OrchestrationState\"A\n\x12\x43ontinueAsNewEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"F\n\x17\x45xecutionSuspendedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"D\n\x15\x45xecutionResumedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xdc\x01\n\x1c\x45ntityOperationSignaledEvent\x12\x11\n\trequestId\x18\x01 \x01(\t\x12\x11\n\toperation\x18\x02 \x01(\t\x12\x31\n\rscheduledTime\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10targetInstanceId\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xcb\x02\n\x1a\x45ntityOperationCalledEvent\x12\x11\n\trequestId\x18\x01 \x01(\t\x12\x11\n\toperation\x18\x02 \x01(\t\x12\x31\n\rscheduledTime\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10parentInstanceId\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x37\n\x11parentExecutionId\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10targetInstanceId\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x90\x01\n\x18\x45ntityLockRequestedEvent\x12\x19\n\x11\x63riticalSectionId\x18\x01 \x01(\t\x12\x0f\n\x07lockSet\x18\x02 \x03(\t\x12\x10\n\x08position\x18\x03 \x01(\x05\x12\x36\n\x10parentInstanceId\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"`\n\x1d\x45ntityOperationCompletedEvent\x12\x11\n\trequestId\x18\x01 \x01(\t\x12,\n\x06output\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\\\n\x1a\x45ntityOperationFailedEvent\x12\x11\n\trequestId\x18\x01 \x01(\t\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\"\xa2\x01\n\x15\x45ntityUnlockSentEvent\x12\x19\n\x11\x63riticalSectionId\x18\x01 \x01(\t\x12\x36\n\x10parentInstanceId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10targetInstanceId\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"3\n\x16\x45ntityLockGrantedEvent\x12\x19\n\x11\x63riticalSectionId\x18\x01 \x01(\t\"\xd9\x0c\n\x0cHistoryEvent\x12\x0f\n\x07\x65ventId\x18\x01 \x01(\x05\x12-\n\ttimestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x10\x65xecutionStarted\x18\x03 \x01(\x0b\x32\x16.ExecutionStartedEventH\x00\x12\x36\n\x12\x65xecutionCompleted\x18\x04 \x01(\x0b\x32\x18.ExecutionCompletedEventH\x00\x12\x38\n\x13\x65xecutionTerminated\x18\x05 \x01(\x0b\x32\x19.ExecutionTerminatedEventH\x00\x12,\n\rtaskScheduled\x18\x06 \x01(\x0b\x32\x13.TaskScheduledEventH\x00\x12,\n\rtaskCompleted\x18\x07 \x01(\x0b\x32\x13.TaskCompletedEventH\x00\x12&\n\ntaskFailed\x18\x08 \x01(\x0b\x32\x10.TaskFailedEventH\x00\x12P\n\x1fsubOrchestrationInstanceCreated\x18\t \x01(\x0b\x32%.SubOrchestrationInstanceCreatedEventH\x00\x12T\n!subOrchestrationInstanceCompleted\x18\n \x01(\x0b\x32\'.SubOrchestrationInstanceCompletedEventH\x00\x12N\n\x1esubOrchestrationInstanceFailed\x18\x0b \x01(\x0b\x32$.SubOrchestrationInstanceFailedEventH\x00\x12*\n\x0ctimerCreated\x18\x0c \x01(\x0b\x32\x12.TimerCreatedEventH\x00\x12&\n\ntimerFired\x18\r \x01(\x0b\x32\x10.TimerFiredEventH\x00\x12\x38\n\x13orchestratorStarted\x18\x0e \x01(\x0b\x32\x19.OrchestratorStartedEventH\x00\x12<\n\x15orchestratorCompleted\x18\x0f \x01(\x0b\x32\x1b.OrchestratorCompletedEventH\x00\x12$\n\teventSent\x18\x10 \x01(\x0b\x32\x0f.EventSentEventH\x00\x12(\n\x0b\x65ventRaised\x18\x11 \x01(\x0b\x32\x11.EventRaisedEventH\x00\x12%\n\x0cgenericEvent\x18\x12 \x01(\x0b\x32\r.GenericEventH\x00\x12*\n\x0chistoryState\x18\x13 \x01(\x0b\x32\x12.HistoryStateEventH\x00\x12,\n\rcontinueAsNew\x18\x14 \x01(\x0b\x32\x13.ContinueAsNewEventH\x00\x12\x36\n\x12\x65xecutionSuspended\x18\x15 \x01(\x0b\x32\x18.ExecutionSuspendedEventH\x00\x12\x32\n\x10\x65xecutionResumed\x18\x16 \x01(\x0b\x32\x16.ExecutionResumedEventH\x00\x12@\n\x17\x65ntityOperationSignaled\x18\x17 \x01(\x0b\x32\x1d.EntityOperationSignaledEventH\x00\x12<\n\x15\x65ntityOperationCalled\x18\x18 \x01(\x0b\x32\x1b.EntityOperationCalledEventH\x00\x12\x42\n\x18\x65ntityOperationCompleted\x18\x19 \x01(\x0b\x32\x1e.EntityOperationCompletedEventH\x00\x12<\n\x15\x65ntityOperationFailed\x18\x1a \x01(\x0b\x32\x1b.EntityOperationFailedEventH\x00\x12\x38\n\x13\x65ntityLockRequested\x18\x1b \x01(\x0b\x32\x19.EntityLockRequestedEventH\x00\x12\x34\n\x11\x65ntityLockGranted\x18\x1c \x01(\x0b\x32\x17.EntityLockGrantedEventH\x00\x12\x32\n\x10\x65ntityUnlockSent\x18\x1d \x01(\x0b\x32\x16.EntityUnlockSentEventH\x00\x12 \n\x06router\x18\x1e \x01(\x0b\x32\x0b.TaskRouterH\x01\x88\x01\x01\x42\x0b\n\teventTypeB\t\n\x07_router\"\xc4\x01\n\x12ScheduleTaskAction\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12 \n\x06router\x18\x04 \x01(\x0b\x32\x0b.TaskRouterH\x00\x88\x01\x01\x12\x17\n\x0ftaskExecutionId\x18\x05 \x01(\tB\t\n\x07_router\"\xc9\x01\n\x1c\x43reateSubOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12 \n\x06router\x18\x05 \x01(\x0b\x32\x0b.TaskRouterH\x00\x88\x01\x01\x42\t\n\x07_router\"[\n\x11\x43reateTimerAction\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x11\n\x04name\x18\x02 \x01(\tH\x00\x88\x01\x01\x42\x07\n\x05_name\"u\n\x0fSendEventAction\x12(\n\x08instance\x18\x01 \x01(\x0b\x32\x16.OrchestrationInstance\x12\x0c\n\x04name\x18\x02 \x01(\t\x12*\n\x04\x64\x61ta\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xb4\x02\n\x1b\x43ompleteOrchestrationAction\x12\x31\n\x13orchestrationStatus\x18\x01 \x01(\x0e\x32\x14.OrchestrationStatus\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07\x64\x65tails\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\nnewVersion\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12&\n\x0f\x63\x61rryoverEvents\x18\x05 \x03(\x0b\x32\r.HistoryEvent\x12+\n\x0e\x66\x61ilureDetails\x18\x06 \x01(\x0b\x32\x13.TaskFailureDetails\"q\n\x1cTerminateOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x0f\n\x07recurse\x18\x03 \x01(\x08\"\x9c\x02\n\x17SendEntityMessageAction\x12@\n\x17\x65ntityOperationSignaled\x18\x01 \x01(\x0b\x32\x1d.EntityOperationSignaledEventH\x00\x12<\n\x15\x65ntityOperationCalled\x18\x02 \x01(\x0b\x32\x1b.EntityOperationCalledEventH\x00\x12\x38\n\x13\x65ntityLockRequested\x18\x03 \x01(\x0b\x32\x19.EntityLockRequestedEventH\x00\x12\x32\n\x10\x65ntityUnlockSent\x18\x04 \x01(\x0b\x32\x16.EntityUnlockSentEventH\x00\x42\x13\n\x11\x45ntityMessageType\"\xde\x03\n\x12OrchestratorAction\x12\n\n\x02id\x18\x01 \x01(\x05\x12+\n\x0cscheduleTask\x18\x02 \x01(\x0b\x32\x13.ScheduleTaskActionH\x00\x12?\n\x16\x63reateSubOrchestration\x18\x03 \x01(\x0b\x32\x1d.CreateSubOrchestrationActionH\x00\x12)\n\x0b\x63reateTimer\x18\x04 \x01(\x0b\x32\x12.CreateTimerActionH\x00\x12%\n\tsendEvent\x18\x05 \x01(\x0b\x32\x10.SendEventActionH\x00\x12=\n\x15\x63ompleteOrchestration\x18\x06 \x01(\x0b\x32\x1c.CompleteOrchestrationActionH\x00\x12?\n\x16terminateOrchestration\x18\x07 \x01(\x0b\x32\x1d.TerminateOrchestrationActionH\x00\x12\x35\n\x11sendEntityMessage\x18\x08 \x01(\x0b\x32\x18.SendEntityMessageActionH\x00\x12 \n\x06router\x18\t \x01(\x0b\x32\x0b.TaskRouterH\x01\x88\x01\x01\x42\x18\n\x16orchestratorActionTypeB\t\n\x07_router\"\xa9\x02\n\x13OrchestratorRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12!\n\npastEvents\x18\x03 \x03(\x0b\x32\r.HistoryEvent\x12 \n\tnewEvents\x18\x04 \x03(\x0b\x32\r.HistoryEvent\x12\x37\n\x10\x65ntityParameters\x18\x05 \x01(\x0b\x32\x1d.OrchestratorEntityParameters\x12 \n\x18requiresHistoryStreaming\x18\x06 \x01(\x08\x12 \n\x06router\x18\x07 \x01(\x0b\x32\x0b.TaskRouterH\x00\x88\x01\x01\x42\t\n\x07_router\"\xd6\x01\n\x14OrchestratorResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12$\n\x07\x61\x63tions\x18\x02 \x03(\x0b\x32\x13.OrchestratorAction\x12\x32\n\x0c\x63ustomStatus\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x17\n\x0f\x63ompletionToken\x18\x04 \x01(\t\x12\x37\n\x12numEventsProcessed\x18\x05 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\"\xce\x03\n\x15\x43reateInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12;\n\x17scheduledStartTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12?\n\x1aorchestrationIdReusePolicy\x18\x06 \x01(\x0b\x32\x1b.OrchestrationIdReusePolicy\x12\x31\n\x0b\x65xecutionId\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x04tags\x18\x08 \x03(\x0b\x32 .CreateInstanceRequest.TagsEntry\x12)\n\x12parentTraceContext\x18\t \x01(\x0b\x32\r.TraceContext\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"w\n\x1aOrchestrationIdReusePolicy\x12-\n\x0foperationStatus\x18\x01 \x03(\x0e\x32\x14.OrchestrationStatus\x12*\n\x06\x61\x63tion\x18\x02 \x01(\x0e\x32\x1a.CreateOrchestrationAction\",\n\x16\x43reateInstanceResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\"E\n\x12GetInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x1b\n\x13getInputsAndOutputs\x18\x02 \x01(\x08\"V\n\x13GetInstanceResponse\x12\x0e\n\x06\x65xists\x18\x01 \x01(\x08\x12/\n\x12orchestrationState\x18\x02 \x01(\x0b\x32\x13.OrchestrationState\"Y\n\x15RewindInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x18\n\x16RewindInstanceResponse\"\xfe\x05\n\x12OrchestrationState\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\x13orchestrationStatus\x18\x04 \x01(\x0e\x32\x14.OrchestrationStatus\x12;\n\x17scheduledStartTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x34\n\x10\x63reatedTimestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x38\n\x14lastUpdatedTimestamp\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\x05input\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12,\n\x06output\x18\t \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0c\x63ustomStatus\x18\n \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x0b \x01(\x0b\x32\x13.TaskFailureDetails\x12\x31\n\x0b\x65xecutionId\x18\x0c \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x12\x63ompletedTimestamp\x18\r \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x36\n\x10parentInstanceId\x18\x0e \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x04tags\x18\x0f \x03(\x0b\x32\x1d.OrchestrationState.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"b\n\x11RaiseEventRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x14\n\x12RaiseEventResponse\"g\n\x10TerminateRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06output\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x11\n\trecursive\x18\x03 \x01(\x08\"\x13\n\x11TerminateResponse\"R\n\x0eSuspendRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x11\n\x0fSuspendResponse\"Q\n\rResumeRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x10\n\x0eResumeResponse\"6\n\x15QueryInstancesRequest\x12\x1d\n\x05query\x18\x01 \x01(\x0b\x32\x0e.InstanceQuery\"\x82\x03\n\rInstanceQuery\x12+\n\rruntimeStatus\x18\x01 \x03(\x0e\x32\x14.OrchestrationStatus\x12\x33\n\x0f\x63reatedTimeFrom\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rcreatedTimeTo\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0ctaskHubNames\x18\x04 \x03(\x0b\x32\x1c.google.protobuf.StringValue\x12\x18\n\x10maxInstanceCount\x18\x05 \x01(\x05\x12\x37\n\x11\x63ontinuationToken\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10instanceIdPrefix\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1d\n\x15\x66\x65tchInputsAndOutputs\x18\x08 \x01(\x08\"\x82\x01\n\x16QueryInstancesResponse\x12/\n\x12orchestrationState\x18\x01 \x03(\x0b\x32\x13.OrchestrationState\x12\x37\n\x11\x63ontinuationToken\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x80\x01\n\x15PurgeInstancesRequest\x12\x14\n\ninstanceId\x18\x01 \x01(\tH\x00\x12\x33\n\x13purgeInstanceFilter\x18\x02 \x01(\x0b\x32\x14.PurgeInstanceFilterH\x00\x12\x11\n\trecursive\x18\x03 \x01(\x08\x42\t\n\x07request\"\xaa\x01\n\x13PurgeInstanceFilter\x12\x33\n\x0f\x63reatedTimeFrom\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rcreatedTimeTo\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\rruntimeStatus\x18\x03 \x03(\x0e\x32\x14.OrchestrationStatus\"f\n\x16PurgeInstancesResponse\x12\x1c\n\x14\x64\x65letedInstanceCount\x18\x01 \x01(\x05\x12.\n\nisComplete\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\"0\n\x14\x43reateTaskHubRequest\x12\x18\n\x10recreateIfExists\x18\x01 \x01(\x08\"\x17\n\x15\x43reateTaskHubResponse\"\x16\n\x14\x44\x65leteTaskHubRequest\"\x17\n\x15\x44\x65leteTaskHubResponse\"\xaa\x01\n\x13SignalEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x11\n\trequestId\x18\x04 \x01(\t\x12\x31\n\rscheduledTime\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\x16\n\x14SignalEntityResponse\"<\n\x10GetEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x14\n\x0cincludeState\x18\x02 \x01(\x08\"D\n\x11GetEntityResponse\x12\x0e\n\x06\x65xists\x18\x01 \x01(\x08\x12\x1f\n\x06\x65ntity\x18\x02 \x01(\x0b\x32\x0f.EntityMetadata\"\xcb\x02\n\x0b\x45ntityQuery\x12:\n\x14instanceIdStartsWith\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x34\n\x10lastModifiedFrom\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0elastModifiedTo\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x14\n\x0cincludeState\x18\x04 \x01(\x08\x12\x18\n\x10includeTransient\x18\x05 \x01(\x08\x12-\n\x08pageSize\x18\x06 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12\x37\n\x11\x63ontinuationToken\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"3\n\x14QueryEntitiesRequest\x12\x1b\n\x05query\x18\x01 \x01(\x0b\x32\x0c.EntityQuery\"s\n\x15QueryEntitiesResponse\x12!\n\x08\x65ntities\x18\x01 \x03(\x0b\x32\x0f.EntityMetadata\x12\x37\n\x11\x63ontinuationToken\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xdb\x01\n\x0e\x45ntityMetadata\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x34\n\x10lastModifiedTime\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x18\n\x10\x62\x61\x63klogQueueSize\x18\x03 \x01(\x05\x12.\n\x08lockedBy\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x0fserializedState\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x8f\x01\n\x19\x43leanEntityStorageRequest\x12\x37\n\x11\x63ontinuationToken\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1b\n\x13removeEmptyEntities\x18\x02 \x01(\x08\x12\x1c\n\x14releaseOrphanedLocks\x18\x03 \x01(\x08\"\x92\x01\n\x1a\x43leanEntityStorageResponse\x12\x37\n\x11\x63ontinuationToken\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1c\n\x14\x65mptyEntitiesRemoved\x18\x02 \x01(\x05\x12\x1d\n\x15orphanedLocksReleased\x18\x03 \x01(\x05\"]\n\x1cOrchestratorEntityParameters\x12=\n\x1a\x65ntityMessageReorderWindow\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\"\x82\x01\n\x12\x45ntityBatchRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65ntityState\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12%\n\noperations\x18\x03 \x03(\x0b\x32\x11.OperationRequest\"\xfa\x01\n\x11\x45ntityBatchResult\x12!\n\x07results\x18\x01 \x03(\x0b\x32\x10.OperationResult\x12!\n\x07\x61\x63tions\x18\x02 \x03(\x0b\x32\x10.OperationAction\x12\x31\n\x0b\x65ntityState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\x12\x17\n\x0f\x63ompletionToken\x18\x05 \x01(\t\x12&\n\x0eoperationInfos\x18\x06 \x03(\x0b\x32\x0e.OperationInfo\"\x95\x01\n\rEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x13\n\x0b\x65xecutionId\x18\x02 \x01(\t\x12\x31\n\x0b\x65ntityState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12(\n\x11operationRequests\x18\x04 \x03(\x0b\x32\r.HistoryEvent\"e\n\x10OperationRequest\x12\x11\n\toperation\x18\x01 \x01(\t\x12\x11\n\trequestId\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"w\n\x0fOperationResult\x12*\n\x07success\x18\x01 \x01(\x0b\x32\x17.OperationResultSuccessH\x00\x12*\n\x07\x66\x61ilure\x18\x02 \x01(\x0b\x32\x17.OperationResultFailureH\x00\x42\x0c\n\nresultType\"W\n\rOperationInfo\x12\x11\n\trequestId\x18\x01 \x01(\t\x12\x33\n\x13responseDestination\x18\x02 \x01(\x0b\x32\x16.OrchestrationInstance\"F\n\x16OperationResultSuccess\x12,\n\x06result\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"E\n\x16OperationResultFailure\x12+\n\x0e\x66\x61ilureDetails\x18\x01 \x01(\x0b\x32\x13.TaskFailureDetails\"\x9c\x01\n\x0fOperationAction\x12\n\n\x02id\x18\x01 \x01(\x05\x12\'\n\nsendSignal\x18\x02 \x01(\x0b\x32\x11.SendSignalActionH\x00\x12=\n\x15startNewOrchestration\x18\x03 \x01(\x0b\x32\x1c.StartNewOrchestrationActionH\x00\x42\x15\n\x13operationActionType\"\x94\x01\n\x10SendSignalAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rscheduledTime\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xce\x01\n\x1bStartNewOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rscheduledTime\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"5\n\x1a\x41\x62\x61ndonActivityTaskRequest\x12\x17\n\x0f\x63ompletionToken\x18\x01 \x01(\t\"\x1d\n\x1b\x41\x62\x61ndonActivityTaskResponse\":\n\x1f\x41\x62\x61ndonOrchestrationTaskRequest\x12\x17\n\x0f\x63ompletionToken\x18\x01 \x01(\t\"\"\n AbandonOrchestrationTaskResponse\"3\n\x18\x41\x62\x61ndonEntityTaskRequest\x12\x17\n\x0f\x63ompletionToken\x18\x01 \x01(\t\"\x1b\n\x19\x41\x62\x61ndonEntityTaskResponse\"\xb9\x01\n\x13GetWorkItemsRequest\x12+\n#maxConcurrentOrchestrationWorkItems\x18\x01 \x01(\x05\x12&\n\x1emaxConcurrentActivityWorkItems\x18\x02 \x01(\x05\x12$\n\x1cmaxConcurrentEntityWorkItems\x18\x03 \x01(\x05\x12\'\n\x0c\x63\x61pabilities\x18\n \x03(\x0e\x32\x11.WorkerCapability\"\x8c\x02\n\x08WorkItem\x12\x33\n\x13orchestratorRequest\x18\x01 \x01(\x0b\x32\x14.OrchestratorRequestH\x00\x12+\n\x0f\x61\x63tivityRequest\x18\x02 \x01(\x0b\x32\x10.ActivityRequestH\x00\x12,\n\rentityRequest\x18\x03 \x01(\x0b\x32\x13.EntityBatchRequestH\x00\x12!\n\nhealthPing\x18\x04 \x01(\x0b\x32\x0b.HealthPingH\x00\x12)\n\x0f\x65ntityRequestV2\x18\x05 \x01(\x0b\x32\x0e.EntityRequestH\x00\x12\x17\n\x0f\x63ompletionToken\x18\n \x01(\tB\t\n\x07request\"\x16\n\x14\x43ompleteTaskResponse\"\x0c\n\nHealthPing\"\x84\x01\n\x1cStreamInstanceHistoryRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1d\n\x15\x66orWorkItemProcessing\x18\x03 \x01(\x08\"-\n\x0cHistoryChunk\x12\x1d\n\x06\x65vents\x18\x01 \x03(\x0b\x32\r.HistoryEvent\"\xbd\x01\n\x1dRerunWorkflowFromEventRequest\x12\x18\n\x10sourceInstanceID\x18\x01 \x01(\t\x12\x0f\n\x07\x65ventID\x18\x02 \x01(\r\x12\x1a\n\rnewInstanceID\x18\x03 \x01(\tH\x00\x88\x01\x01\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x16\n\x0eoverwriteInput\x18\x05 \x01(\x08\x42\x10\n\x0e_newInstanceID\"7\n\x1eRerunWorkflowFromEventResponse\x12\x15\n\rnewInstanceID\x18\x01 \x01(\t*\xb5\x02\n\x13OrchestrationStatus\x12 \n\x1cORCHESTRATION_STATUS_RUNNING\x10\x00\x12\"\n\x1eORCHESTRATION_STATUS_COMPLETED\x10\x01\x12)\n%ORCHESTRATION_STATUS_CONTINUED_AS_NEW\x10\x02\x12\x1f\n\x1bORCHESTRATION_STATUS_FAILED\x10\x03\x12!\n\x1dORCHESTRATION_STATUS_CANCELED\x10\x04\x12#\n\x1fORCHESTRATION_STATUS_TERMINATED\x10\x05\x12 \n\x1cORCHESTRATION_STATUS_PENDING\x10\x06\x12\"\n\x1eORCHESTRATION_STATUS_SUSPENDED\x10\x07*A\n\x19\x43reateOrchestrationAction\x12\t\n\x05\x45RROR\x10\x00\x12\n\n\x06IGNORE\x10\x01\x12\r\n\tTERMINATE\x10\x02*^\n\x10WorkerCapability\x12!\n\x1dWORKER_CAPABILITY_UNSPECIFIED\x10\x00\x12\'\n#WORKER_CAPABILITY_HISTORY_STREAMING\x10\x01\x32\xb6\x0e\n\x15TaskHubSidecarService\x12\x37\n\x05Hello\x12\x16.google.protobuf.Empty\x1a\x16.google.protobuf.Empty\x12@\n\rStartInstance\x12\x16.CreateInstanceRequest\x1a\x17.CreateInstanceResponse\x12\x38\n\x0bGetInstance\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x41\n\x0eRewindInstance\x12\x16.RewindInstanceRequest\x1a\x17.RewindInstanceResponse\x12\x41\n\x14WaitForInstanceStart\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x46\n\x19WaitForInstanceCompletion\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x35\n\nRaiseEvent\x12\x12.RaiseEventRequest\x1a\x13.RaiseEventResponse\x12:\n\x11TerminateInstance\x12\x11.TerminateRequest\x1a\x12.TerminateResponse\x12\x34\n\x0fSuspendInstance\x12\x0f.SuspendRequest\x1a\x10.SuspendResponse\x12\x31\n\x0eResumeInstance\x12\x0e.ResumeRequest\x1a\x0f.ResumeResponse\x12\x41\n\x0eQueryInstances\x12\x16.QueryInstancesRequest\x1a\x17.QueryInstancesResponse\x12\x41\n\x0ePurgeInstances\x12\x16.PurgeInstancesRequest\x1a\x17.PurgeInstancesResponse\x12\x31\n\x0cGetWorkItems\x12\x14.GetWorkItemsRequest\x1a\t.WorkItem0\x01\x12@\n\x14\x43ompleteActivityTask\x12\x11.ActivityResponse\x1a\x15.CompleteTaskResponse\x12H\n\x18\x43ompleteOrchestratorTask\x12\x15.OrchestratorResponse\x1a\x15.CompleteTaskResponse\x12?\n\x12\x43ompleteEntityTask\x12\x12.EntityBatchResult\x1a\x15.CompleteTaskResponse\x12G\n\x15StreamInstanceHistory\x12\x1d.StreamInstanceHistoryRequest\x1a\r.HistoryChunk0\x01\x12>\n\rCreateTaskHub\x12\x15.CreateTaskHubRequest\x1a\x16.CreateTaskHubResponse\x12>\n\rDeleteTaskHub\x12\x15.DeleteTaskHubRequest\x1a\x16.DeleteTaskHubResponse\x12;\n\x0cSignalEntity\x12\x14.SignalEntityRequest\x1a\x15.SignalEntityResponse\x12\x32\n\tGetEntity\x12\x11.GetEntityRequest\x1a\x12.GetEntityResponse\x12>\n\rQueryEntities\x12\x15.QueryEntitiesRequest\x1a\x16.QueryEntitiesResponse\x12M\n\x12\x43leanEntityStorage\x12\x1a.CleanEntityStorageRequest\x1a\x1b.CleanEntityStorageResponse\x12X\n\x1b\x41\x62\x61ndonTaskActivityWorkItem\x12\x1b.AbandonActivityTaskRequest\x1a\x1c.AbandonActivityTaskResponse\x12\x66\n\x1f\x41\x62\x61ndonTaskOrchestratorWorkItem\x12 .AbandonOrchestrationTaskRequest\x1a!.AbandonOrchestrationTaskResponse\x12R\n\x19\x41\x62\x61ndonTaskEntityWorkItem\x12\x19.AbandonEntityTaskRequest\x1a\x1a.AbandonEntityTaskResponse\x12Y\n\x16RerunWorkflowFromEvent\x12\x1e.RerunWorkflowFromEventRequest\x1a\x1f.RerunWorkflowFromEventResponseBV\n+io.dapr.durabletask.implementation.protobufZ\x0b/api/protos\xaa\x02\x19\x44\x61pr.DurableTask.Protobufb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n/durabletask/internal/orchestrator_service.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1bgoogle/protobuf/empty.proto\"K\n\nTaskRouter\x12\x13\n\x0bsourceAppID\x18\x01 \x01(\t\x12\x18\n\x0btargetAppID\x18\x02 \x01(\tH\x00\x88\x01\x01\x42\x0e\n\x0c_targetAppID\"C\n\x14OrchestrationVersion\x12\x0f\n\x07patches\x18\x01 \x03(\t\x12\x11\n\x04name\x18\x02 \x01(\tH\x00\x88\x01\x01\x42\x07\n\x05_name\"^\n\x15OrchestrationInstance\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x86\x02\n\x0f\x41\x63tivityRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\x12\x0e\n\x06taskId\x18\x05 \x01(\x05\x12)\n\x12parentTraceContext\x18\x06 \x01(\x0b\x32\r.TraceContext\x12\x17\n\x0ftaskExecutionId\x18\x07 \x01(\t\"\xaa\x01\n\x10\x41\x63tivityResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0e\n\x06taskId\x18\x02 \x01(\x05\x12,\n\x06result\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\x12\x17\n\x0f\x63ompletionToken\x18\x05 \x01(\t\"\xb2\x01\n\x12TaskFailureDetails\x12\x11\n\terrorType\x18\x01 \x01(\t\x12\x14\n\x0c\x65rrorMessage\x18\x02 \x01(\t\x12\x30\n\nstackTrace\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x0cinnerFailure\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\x12\x16\n\x0eisNonRetriable\x18\x05 \x01(\x08\"\xdd\x01\n\x12ParentInstanceInfo\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12*\n\x04name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\x12\x12\n\x05\x61ppID\x18\x05 \x01(\tH\x00\x88\x01\x01\x42\x08\n\x06_appID\"-\n\x17RerunParentInstanceInfo\x12\x12\n\ninstanceID\x18\x01 \x01(\t\"i\n\x0cTraceContext\x12\x13\n\x0btraceParent\x18\x01 \x01(\t\x12\x12\n\x06spanID\x18\x02 \x01(\tB\x02\x18\x01\x12\x30\n\ntraceState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xe5\x03\n\x15\x45xecutionStartedEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\x12+\n\x0eparentInstance\x18\x05 \x01(\x0b\x32\x13.ParentInstanceInfo\x12;\n\x17scheduledStartTimestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12)\n\x12parentTraceContext\x18\x07 \x01(\x0b\x32\r.TraceContext\x12\x39\n\x13orchestrationSpanID\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x04tags\x18\t \x03(\x0b\x32 .ExecutionStartedEvent.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xa7\x01\n\x17\x45xecutionCompletedEvent\x12\x31\n\x13orchestrationStatus\x18\x01 \x01(\x0e\x32\x14.OrchestrationStatus\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x03 \x01(\x0b\x32\x13.TaskFailureDetails\"X\n\x18\x45xecutionTerminatedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x0f\n\x07recurse\x18\x02 \x01(\x08\"\x9e\x02\n\x12TaskScheduledEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x12parentTraceContext\x18\x04 \x01(\x0b\x32\r.TraceContext\x12\x17\n\x0ftaskExecutionId\x18\x05 \x01(\t\x12>\n\x17rerunParentInstanceInfo\x18\x06 \x01(\x0b\x32\x18.RerunParentInstanceInfoH\x00\x88\x01\x01\x42\x1a\n\x18_rerunParentInstanceInfo\"t\n\x12TaskCompletedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x17\n\x0ftaskExecutionId\x18\x03 \x01(\t\"p\n\x0fTaskFailedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\x12\x17\n\x0ftaskExecutionId\x18\x03 \x01(\t\"\xab\x02\n$SubOrchestrationInstanceCreatedEvent\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x12parentTraceContext\x18\x05 \x01(\x0b\x32\r.TraceContext\x12>\n\x17rerunParentInstanceInfo\x18\x06 \x01(\x0b\x32\x18.RerunParentInstanceInfoH\x00\x88\x01\x01\x42\x1a\n\x18_rerunParentInstanceInfo\"o\n&SubOrchestrationInstanceCompletedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"k\n#SubOrchestrationInstanceFailedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\"\xb7\x01\n\x11TimerCreatedEvent\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x11\n\x04name\x18\x02 \x01(\tH\x00\x88\x01\x01\x12>\n\x17rerunParentInstanceInfo\x18\x03 \x01(\x0b\x32\x18.RerunParentInstanceInfoH\x01\x88\x01\x01\x42\x07\n\x05_nameB\x1a\n\x18_rerunParentInstanceInfo\"N\n\x0fTimerFiredEvent\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0f\n\x07timerId\x18\x02 \x01(\x05\"S\n\x18OrchestratorStartedEvent\x12+\n\x07version\x18\x01 \x01(\x0b\x32\x15.OrchestrationVersionH\x00\x88\x01\x01\x42\n\n\x08_version\"\x1c\n\x1aOrchestratorCompletedEvent\"_\n\x0e\x45ventSentEvent\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"M\n\x10\x45ventRaisedEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x05input\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\":\n\x0cGenericEvent\x12*\n\x04\x64\x61ta\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"D\n\x11HistoryStateEvent\x12/\n\x12orchestrationState\x18\x01 \x01(\x0b\x32\x13.OrchestrationState\"A\n\x12\x43ontinueAsNewEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"F\n\x17\x45xecutionSuspendedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"D\n\x15\x45xecutionResumedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"a\n\x15\x45xecutionStalledEvent\x12\x1e\n\x06reason\x18\x01 \x01(\x0e\x32\x0e.StalledReason\x12\x18\n\x0b\x64\x65scription\x18\x02 \x01(\tH\x00\x88\x01\x01\x42\x0e\n\x0c_description\"\xdc\x01\n\x1c\x45ntityOperationSignaledEvent\x12\x11\n\trequestId\x18\x01 \x01(\t\x12\x11\n\toperation\x18\x02 \x01(\t\x12\x31\n\rscheduledTime\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10targetInstanceId\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xcb\x02\n\x1a\x45ntityOperationCalledEvent\x12\x11\n\trequestId\x18\x01 \x01(\t\x12\x11\n\toperation\x18\x02 \x01(\t\x12\x31\n\rscheduledTime\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10parentInstanceId\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x37\n\x11parentExecutionId\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10targetInstanceId\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x90\x01\n\x18\x45ntityLockRequestedEvent\x12\x19\n\x11\x63riticalSectionId\x18\x01 \x01(\t\x12\x0f\n\x07lockSet\x18\x02 \x03(\t\x12\x10\n\x08position\x18\x03 \x01(\x05\x12\x36\n\x10parentInstanceId\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"`\n\x1d\x45ntityOperationCompletedEvent\x12\x11\n\trequestId\x18\x01 \x01(\t\x12,\n\x06output\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\\\n\x1a\x45ntityOperationFailedEvent\x12\x11\n\trequestId\x18\x01 \x01(\t\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\"\xa2\x01\n\x15\x45ntityUnlockSentEvent\x12\x19\n\x11\x63riticalSectionId\x18\x01 \x01(\t\x12\x36\n\x10parentInstanceId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10targetInstanceId\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"3\n\x16\x45ntityLockGrantedEvent\x12\x19\n\x11\x63riticalSectionId\x18\x01 \x01(\t\"\x8d\r\n\x0cHistoryEvent\x12\x0f\n\x07\x65ventId\x18\x01 \x01(\x05\x12-\n\ttimestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x10\x65xecutionStarted\x18\x03 \x01(\x0b\x32\x16.ExecutionStartedEventH\x00\x12\x36\n\x12\x65xecutionCompleted\x18\x04 \x01(\x0b\x32\x18.ExecutionCompletedEventH\x00\x12\x38\n\x13\x65xecutionTerminated\x18\x05 \x01(\x0b\x32\x19.ExecutionTerminatedEventH\x00\x12,\n\rtaskScheduled\x18\x06 \x01(\x0b\x32\x13.TaskScheduledEventH\x00\x12,\n\rtaskCompleted\x18\x07 \x01(\x0b\x32\x13.TaskCompletedEventH\x00\x12&\n\ntaskFailed\x18\x08 \x01(\x0b\x32\x10.TaskFailedEventH\x00\x12P\n\x1fsubOrchestrationInstanceCreated\x18\t \x01(\x0b\x32%.SubOrchestrationInstanceCreatedEventH\x00\x12T\n!subOrchestrationInstanceCompleted\x18\n \x01(\x0b\x32\'.SubOrchestrationInstanceCompletedEventH\x00\x12N\n\x1esubOrchestrationInstanceFailed\x18\x0b \x01(\x0b\x32$.SubOrchestrationInstanceFailedEventH\x00\x12*\n\x0ctimerCreated\x18\x0c \x01(\x0b\x32\x12.TimerCreatedEventH\x00\x12&\n\ntimerFired\x18\r \x01(\x0b\x32\x10.TimerFiredEventH\x00\x12\x38\n\x13orchestratorStarted\x18\x0e \x01(\x0b\x32\x19.OrchestratorStartedEventH\x00\x12<\n\x15orchestratorCompleted\x18\x0f \x01(\x0b\x32\x1b.OrchestratorCompletedEventH\x00\x12$\n\teventSent\x18\x10 \x01(\x0b\x32\x0f.EventSentEventH\x00\x12(\n\x0b\x65ventRaised\x18\x11 \x01(\x0b\x32\x11.EventRaisedEventH\x00\x12%\n\x0cgenericEvent\x18\x12 \x01(\x0b\x32\r.GenericEventH\x00\x12*\n\x0chistoryState\x18\x13 \x01(\x0b\x32\x12.HistoryStateEventH\x00\x12,\n\rcontinueAsNew\x18\x14 \x01(\x0b\x32\x13.ContinueAsNewEventH\x00\x12\x36\n\x12\x65xecutionSuspended\x18\x15 \x01(\x0b\x32\x18.ExecutionSuspendedEventH\x00\x12\x32\n\x10\x65xecutionResumed\x18\x16 \x01(\x0b\x32\x16.ExecutionResumedEventH\x00\x12@\n\x17\x65ntityOperationSignaled\x18\x17 \x01(\x0b\x32\x1d.EntityOperationSignaledEventH\x00\x12<\n\x15\x65ntityOperationCalled\x18\x18 \x01(\x0b\x32\x1b.EntityOperationCalledEventH\x00\x12\x42\n\x18\x65ntityOperationCompleted\x18\x19 \x01(\x0b\x32\x1e.EntityOperationCompletedEventH\x00\x12<\n\x15\x65ntityOperationFailed\x18\x1a \x01(\x0b\x32\x1b.EntityOperationFailedEventH\x00\x12\x38\n\x13\x65ntityLockRequested\x18\x1b \x01(\x0b\x32\x19.EntityLockRequestedEventH\x00\x12\x34\n\x11\x65ntityLockGranted\x18\x1c \x01(\x0b\x32\x17.EntityLockGrantedEventH\x00\x12\x32\n\x10\x65ntityUnlockSent\x18\x1d \x01(\x0b\x32\x16.EntityUnlockSentEventH\x00\x12\x32\n\x10\x65xecutionStalled\x18\x1f \x01(\x0b\x32\x16.ExecutionStalledEventH\x00\x12 \n\x06router\x18\x1e \x01(\x0b\x32\x0b.TaskRouterH\x01\x88\x01\x01\x42\x0b\n\teventTypeB\t\n\x07_router\"\xc4\x01\n\x12ScheduleTaskAction\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12 \n\x06router\x18\x04 \x01(\x0b\x32\x0b.TaskRouterH\x00\x88\x01\x01\x12\x17\n\x0ftaskExecutionId\x18\x05 \x01(\tB\t\n\x07_router\"\xc9\x01\n\x1c\x43reateSubOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12 \n\x06router\x18\x05 \x01(\x0b\x32\x0b.TaskRouterH\x00\x88\x01\x01\x42\t\n\x07_router\"[\n\x11\x43reateTimerAction\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x11\n\x04name\x18\x02 \x01(\tH\x00\x88\x01\x01\x42\x07\n\x05_name\"u\n\x0fSendEventAction\x12(\n\x08instance\x18\x01 \x01(\x0b\x32\x16.OrchestrationInstance\x12\x0c\n\x04name\x18\x02 \x01(\t\x12*\n\x04\x64\x61ta\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xb4\x02\n\x1b\x43ompleteOrchestrationAction\x12\x31\n\x13orchestrationStatus\x18\x01 \x01(\x0e\x32\x14.OrchestrationStatus\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07\x64\x65tails\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\nnewVersion\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12&\n\x0f\x63\x61rryoverEvents\x18\x05 \x03(\x0b\x32\r.HistoryEvent\x12+\n\x0e\x66\x61ilureDetails\x18\x06 \x01(\x0b\x32\x13.TaskFailureDetails\"q\n\x1cTerminateOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x0f\n\x07recurse\x18\x03 \x01(\x08\"\x9c\x02\n\x17SendEntityMessageAction\x12@\n\x17\x65ntityOperationSignaled\x18\x01 \x01(\x0b\x32\x1d.EntityOperationSignaledEventH\x00\x12<\n\x15\x65ntityOperationCalled\x18\x02 \x01(\x0b\x32\x1b.EntityOperationCalledEventH\x00\x12\x38\n\x13\x65ntityLockRequested\x18\x03 \x01(\x0b\x32\x19.EntityLockRequestedEventH\x00\x12\x32\n\x10\x65ntityUnlockSent\x18\x04 \x01(\x0b\x32\x16.EntityUnlockSentEventH\x00\x42\x13\n\x11\x45ntityMessageType\"\'\n%OrchestratorVersionNotAvailableAction\"\xb1\x04\n\x12OrchestratorAction\x12\n\n\x02id\x18\x01 \x01(\x05\x12+\n\x0cscheduleTask\x18\x02 \x01(\x0b\x32\x13.ScheduleTaskActionH\x00\x12?\n\x16\x63reateSubOrchestration\x18\x03 \x01(\x0b\x32\x1d.CreateSubOrchestrationActionH\x00\x12)\n\x0b\x63reateTimer\x18\x04 \x01(\x0b\x32\x12.CreateTimerActionH\x00\x12%\n\tsendEvent\x18\x05 \x01(\x0b\x32\x10.SendEventActionH\x00\x12=\n\x15\x63ompleteOrchestration\x18\x06 \x01(\x0b\x32\x1c.CompleteOrchestrationActionH\x00\x12?\n\x16terminateOrchestration\x18\x07 \x01(\x0b\x32\x1d.TerminateOrchestrationActionH\x00\x12\x35\n\x11sendEntityMessage\x18\x08 \x01(\x0b\x32\x18.SendEntityMessageActionH\x00\x12Q\n\x1forchestratorVersionNotAvailable\x18\n \x01(\x0b\x32&.OrchestratorVersionNotAvailableActionH\x00\x12 \n\x06router\x18\t \x01(\x0b\x32\x0b.TaskRouterH\x01\x88\x01\x01\x42\x18\n\x16orchestratorActionTypeB\t\n\x07_router\"\xa9\x02\n\x13OrchestratorRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12!\n\npastEvents\x18\x03 \x03(\x0b\x32\r.HistoryEvent\x12 \n\tnewEvents\x18\x04 \x03(\x0b\x32\r.HistoryEvent\x12\x37\n\x10\x65ntityParameters\x18\x05 \x01(\x0b\x32\x1d.OrchestratorEntityParameters\x12 \n\x18requiresHistoryStreaming\x18\x06 \x01(\x08\x12 \n\x06router\x18\x07 \x01(\x0b\x32\x0b.TaskRouterH\x00\x88\x01\x01\x42\t\n\x07_router\"\x8f\x02\n\x14OrchestratorResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12$\n\x07\x61\x63tions\x18\x02 \x03(\x0b\x32\x13.OrchestratorAction\x12\x32\n\x0c\x63ustomStatus\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x17\n\x0f\x63ompletionToken\x18\x04 \x01(\t\x12\x37\n\x12numEventsProcessed\x18\x05 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12+\n\x07version\x18\x06 \x01(\x0b\x32\x15.OrchestrationVersionH\x00\x88\x01\x01\x42\n\n\x08_version\"\xce\x03\n\x15\x43reateInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12;\n\x17scheduledStartTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12?\n\x1aorchestrationIdReusePolicy\x18\x06 \x01(\x0b\x32\x1b.OrchestrationIdReusePolicy\x12\x31\n\x0b\x65xecutionId\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x04tags\x18\x08 \x03(\x0b\x32 .CreateInstanceRequest.TagsEntry\x12)\n\x12parentTraceContext\x18\t \x01(\x0b\x32\r.TraceContext\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"w\n\x1aOrchestrationIdReusePolicy\x12-\n\x0foperationStatus\x18\x01 \x03(\x0e\x32\x14.OrchestrationStatus\x12*\n\x06\x61\x63tion\x18\x02 \x01(\x0e\x32\x1a.CreateOrchestrationAction\",\n\x16\x43reateInstanceResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\"E\n\x12GetInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x1b\n\x13getInputsAndOutputs\x18\x02 \x01(\x08\"V\n\x13GetInstanceResponse\x12\x0e\n\x06\x65xists\x18\x01 \x01(\x08\x12/\n\x12orchestrationState\x18\x02 \x01(\x0b\x32\x13.OrchestrationState\"Y\n\x15RewindInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x18\n\x16RewindInstanceResponse\"\xfe\x05\n\x12OrchestrationState\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\x13orchestrationStatus\x18\x04 \x01(\x0e\x32\x14.OrchestrationStatus\x12;\n\x17scheduledStartTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x34\n\x10\x63reatedTimestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x38\n\x14lastUpdatedTimestamp\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\x05input\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12,\n\x06output\x18\t \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0c\x63ustomStatus\x18\n \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x0b \x01(\x0b\x32\x13.TaskFailureDetails\x12\x31\n\x0b\x65xecutionId\x18\x0c \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x12\x63ompletedTimestamp\x18\r \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x36\n\x10parentInstanceId\x18\x0e \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x04tags\x18\x0f \x03(\x0b\x32\x1d.OrchestrationState.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"b\n\x11RaiseEventRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x14\n\x12RaiseEventResponse\"g\n\x10TerminateRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06output\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x11\n\trecursive\x18\x03 \x01(\x08\"\x13\n\x11TerminateResponse\"R\n\x0eSuspendRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x11\n\x0fSuspendResponse\"Q\n\rResumeRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x10\n\x0eResumeResponse\"6\n\x15QueryInstancesRequest\x12\x1d\n\x05query\x18\x01 \x01(\x0b\x32\x0e.InstanceQuery\"\x82\x03\n\rInstanceQuery\x12+\n\rruntimeStatus\x18\x01 \x03(\x0e\x32\x14.OrchestrationStatus\x12\x33\n\x0f\x63reatedTimeFrom\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rcreatedTimeTo\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0ctaskHubNames\x18\x04 \x03(\x0b\x32\x1c.google.protobuf.StringValue\x12\x18\n\x10maxInstanceCount\x18\x05 \x01(\x05\x12\x37\n\x11\x63ontinuationToken\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10instanceIdPrefix\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1d\n\x15\x66\x65tchInputsAndOutputs\x18\x08 \x01(\x08\"\x82\x01\n\x16QueryInstancesResponse\x12/\n\x12orchestrationState\x18\x01 \x03(\x0b\x32\x13.OrchestrationState\x12\x37\n\x11\x63ontinuationToken\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x9e\x01\n\x15PurgeInstancesRequest\x12\x14\n\ninstanceId\x18\x01 \x01(\tH\x00\x12\x33\n\x13purgeInstanceFilter\x18\x02 \x01(\x0b\x32\x14.PurgeInstanceFilterH\x00\x12\x11\n\trecursive\x18\x03 \x01(\x08\x12\x12\n\x05\x66orce\x18\x04 \x01(\x08H\x01\x88\x01\x01\x42\t\n\x07requestB\x08\n\x06_force\"\xaa\x01\n\x13PurgeInstanceFilter\x12\x33\n\x0f\x63reatedTimeFrom\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rcreatedTimeTo\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\rruntimeStatus\x18\x03 \x03(\x0e\x32\x14.OrchestrationStatus\"f\n\x16PurgeInstancesResponse\x12\x1c\n\x14\x64\x65letedInstanceCount\x18\x01 \x01(\x05\x12.\n\nisComplete\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\"0\n\x14\x43reateTaskHubRequest\x12\x18\n\x10recreateIfExists\x18\x01 \x01(\x08\"\x17\n\x15\x43reateTaskHubResponse\"\x16\n\x14\x44\x65leteTaskHubRequest\"\x17\n\x15\x44\x65leteTaskHubResponse\"\xaa\x01\n\x13SignalEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x11\n\trequestId\x18\x04 \x01(\t\x12\x31\n\rscheduledTime\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\x16\n\x14SignalEntityResponse\"<\n\x10GetEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x14\n\x0cincludeState\x18\x02 \x01(\x08\"D\n\x11GetEntityResponse\x12\x0e\n\x06\x65xists\x18\x01 \x01(\x08\x12\x1f\n\x06\x65ntity\x18\x02 \x01(\x0b\x32\x0f.EntityMetadata\"\xcb\x02\n\x0b\x45ntityQuery\x12:\n\x14instanceIdStartsWith\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x34\n\x10lastModifiedFrom\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0elastModifiedTo\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x14\n\x0cincludeState\x18\x04 \x01(\x08\x12\x18\n\x10includeTransient\x18\x05 \x01(\x08\x12-\n\x08pageSize\x18\x06 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12\x37\n\x11\x63ontinuationToken\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"3\n\x14QueryEntitiesRequest\x12\x1b\n\x05query\x18\x01 \x01(\x0b\x32\x0c.EntityQuery\"s\n\x15QueryEntitiesResponse\x12!\n\x08\x65ntities\x18\x01 \x03(\x0b\x32\x0f.EntityMetadata\x12\x37\n\x11\x63ontinuationToken\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xdb\x01\n\x0e\x45ntityMetadata\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x34\n\x10lastModifiedTime\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x18\n\x10\x62\x61\x63klogQueueSize\x18\x03 \x01(\x05\x12.\n\x08lockedBy\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x0fserializedState\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x8f\x01\n\x19\x43leanEntityStorageRequest\x12\x37\n\x11\x63ontinuationToken\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1b\n\x13removeEmptyEntities\x18\x02 \x01(\x08\x12\x1c\n\x14releaseOrphanedLocks\x18\x03 \x01(\x08\"\x92\x01\n\x1a\x43leanEntityStorageResponse\x12\x37\n\x11\x63ontinuationToken\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1c\n\x14\x65mptyEntitiesRemoved\x18\x02 \x01(\x05\x12\x1d\n\x15orphanedLocksReleased\x18\x03 \x01(\x05\"]\n\x1cOrchestratorEntityParameters\x12=\n\x1a\x65ntityMessageReorderWindow\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\"\x82\x01\n\x12\x45ntityBatchRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65ntityState\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12%\n\noperations\x18\x03 \x03(\x0b\x32\x11.OperationRequest\"\xfa\x01\n\x11\x45ntityBatchResult\x12!\n\x07results\x18\x01 \x03(\x0b\x32\x10.OperationResult\x12!\n\x07\x61\x63tions\x18\x02 \x03(\x0b\x32\x10.OperationAction\x12\x31\n\x0b\x65ntityState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\x12\x17\n\x0f\x63ompletionToken\x18\x05 \x01(\t\x12&\n\x0eoperationInfos\x18\x06 \x03(\x0b\x32\x0e.OperationInfo\"\x95\x01\n\rEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x13\n\x0b\x65xecutionId\x18\x02 \x01(\t\x12\x31\n\x0b\x65ntityState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12(\n\x11operationRequests\x18\x04 \x03(\x0b\x32\r.HistoryEvent\"e\n\x10OperationRequest\x12\x11\n\toperation\x18\x01 \x01(\t\x12\x11\n\trequestId\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"w\n\x0fOperationResult\x12*\n\x07success\x18\x01 \x01(\x0b\x32\x17.OperationResultSuccessH\x00\x12*\n\x07\x66\x61ilure\x18\x02 \x01(\x0b\x32\x17.OperationResultFailureH\x00\x42\x0c\n\nresultType\"W\n\rOperationInfo\x12\x11\n\trequestId\x18\x01 \x01(\t\x12\x33\n\x13responseDestination\x18\x02 \x01(\x0b\x32\x16.OrchestrationInstance\"F\n\x16OperationResultSuccess\x12,\n\x06result\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"E\n\x16OperationResultFailure\x12+\n\x0e\x66\x61ilureDetails\x18\x01 \x01(\x0b\x32\x13.TaskFailureDetails\"\x9c\x01\n\x0fOperationAction\x12\n\n\x02id\x18\x01 \x01(\x05\x12\'\n\nsendSignal\x18\x02 \x01(\x0b\x32\x11.SendSignalActionH\x00\x12=\n\x15startNewOrchestration\x18\x03 \x01(\x0b\x32\x1c.StartNewOrchestrationActionH\x00\x42\x15\n\x13operationActionType\"\x94\x01\n\x10SendSignalAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rscheduledTime\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xce\x01\n\x1bStartNewOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rscheduledTime\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"5\n\x1a\x41\x62\x61ndonActivityTaskRequest\x12\x17\n\x0f\x63ompletionToken\x18\x01 \x01(\t\"\x1d\n\x1b\x41\x62\x61ndonActivityTaskResponse\":\n\x1f\x41\x62\x61ndonOrchestrationTaskRequest\x12\x17\n\x0f\x63ompletionToken\x18\x01 \x01(\t\"\"\n AbandonOrchestrationTaskResponse\"3\n\x18\x41\x62\x61ndonEntityTaskRequest\x12\x17\n\x0f\x63ompletionToken\x18\x01 \x01(\t\"\x1b\n\x19\x41\x62\x61ndonEntityTaskResponse\"\xb9\x01\n\x13GetWorkItemsRequest\x12+\n#maxConcurrentOrchestrationWorkItems\x18\x01 \x01(\x05\x12&\n\x1emaxConcurrentActivityWorkItems\x18\x02 \x01(\x05\x12$\n\x1cmaxConcurrentEntityWorkItems\x18\x03 \x01(\x05\x12\'\n\x0c\x63\x61pabilities\x18\n \x03(\x0e\x32\x11.WorkerCapability\"\x8c\x02\n\x08WorkItem\x12\x33\n\x13orchestratorRequest\x18\x01 \x01(\x0b\x32\x14.OrchestratorRequestH\x00\x12+\n\x0f\x61\x63tivityRequest\x18\x02 \x01(\x0b\x32\x10.ActivityRequestH\x00\x12,\n\rentityRequest\x18\x03 \x01(\x0b\x32\x13.EntityBatchRequestH\x00\x12!\n\nhealthPing\x18\x04 \x01(\x0b\x32\x0b.HealthPingH\x00\x12)\n\x0f\x65ntityRequestV2\x18\x05 \x01(\x0b\x32\x0e.EntityRequestH\x00\x12\x17\n\x0f\x63ompletionToken\x18\n \x01(\tB\t\n\x07request\"\x16\n\x14\x43ompleteTaskResponse\"\x0c\n\nHealthPing\"\x84\x01\n\x1cStreamInstanceHistoryRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1d\n\x15\x66orWorkItemProcessing\x18\x03 \x01(\x08\"-\n\x0cHistoryChunk\x12\x1d\n\x06\x65vents\x18\x01 \x03(\x0b\x32\r.HistoryEvent\"\x85\x02\n\x1dRerunWorkflowFromEventRequest\x12\x18\n\x10sourceInstanceID\x18\x01 \x01(\t\x12\x0f\n\x07\x65ventID\x18\x02 \x01(\r\x12\x1a\n\rnewInstanceID\x18\x03 \x01(\tH\x00\x88\x01\x01\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x16\n\x0eoverwriteInput\x18\x05 \x01(\x08\x12\'\n\x1anewChildWorkflowInstanceID\x18\x06 \x01(\tH\x01\x88\x01\x01\x42\x10\n\x0e_newInstanceIDB\x1d\n\x1b_newChildWorkflowInstanceID\"7\n\x1eRerunWorkflowFromEventResponse\x12\x15\n\rnewInstanceID\x18\x01 \x01(\t\"r\n\x16ListInstanceIDsRequest\x12\x1e\n\x11\x63ontinuationToken\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x15\n\x08pageSize\x18\x02 \x01(\rH\x01\x88\x01\x01\x42\x14\n\x12_continuationTokenB\x0b\n\t_pageSize\"d\n\x17ListInstanceIDsResponse\x12\x13\n\x0binstanceIds\x18\x01 \x03(\t\x12\x1e\n\x11\x63ontinuationToken\x18\x02 \x01(\tH\x00\x88\x01\x01\x42\x14\n\x12_continuationToken\"/\n\x19GetInstanceHistoryRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\";\n\x1aGetInstanceHistoryResponse\x12\x1d\n\x06\x65vents\x18\x01 \x03(\x0b\x32\r.HistoryEvent*>\n\rStalledReason\x12\x12\n\x0ePATCH_MISMATCH\x10\x00\x12\x19\n\x15VERSION_NOT_AVAILABLE\x10\x01*\xd7\x02\n\x13OrchestrationStatus\x12 \n\x1cORCHESTRATION_STATUS_RUNNING\x10\x00\x12\"\n\x1eORCHESTRATION_STATUS_COMPLETED\x10\x01\x12)\n%ORCHESTRATION_STATUS_CONTINUED_AS_NEW\x10\x02\x12\x1f\n\x1bORCHESTRATION_STATUS_FAILED\x10\x03\x12!\n\x1dORCHESTRATION_STATUS_CANCELED\x10\x04\x12#\n\x1fORCHESTRATION_STATUS_TERMINATED\x10\x05\x12 \n\x1cORCHESTRATION_STATUS_PENDING\x10\x06\x12\"\n\x1eORCHESTRATION_STATUS_SUSPENDED\x10\x07\x12 \n\x1cORCHESTRATION_STATUS_STALLED\x10\x08*A\n\x19\x43reateOrchestrationAction\x12\t\n\x05\x45RROR\x10\x00\x12\n\n\x06IGNORE\x10\x01\x12\r\n\tTERMINATE\x10\x02*^\n\x10WorkerCapability\x12!\n\x1dWORKER_CAPABILITY_UNSPECIFIED\x10\x00\x12\'\n#WORKER_CAPABILITY_HISTORY_STREAMING\x10\x01\x32\xcb\x0f\n\x15TaskHubSidecarService\x12\x37\n\x05Hello\x12\x16.google.protobuf.Empty\x1a\x16.google.protobuf.Empty\x12@\n\rStartInstance\x12\x16.CreateInstanceRequest\x1a\x17.CreateInstanceResponse\x12\x38\n\x0bGetInstance\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x41\n\x0eRewindInstance\x12\x16.RewindInstanceRequest\x1a\x17.RewindInstanceResponse\x12\x41\n\x14WaitForInstanceStart\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x46\n\x19WaitForInstanceCompletion\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x35\n\nRaiseEvent\x12\x12.RaiseEventRequest\x1a\x13.RaiseEventResponse\x12:\n\x11TerminateInstance\x12\x11.TerminateRequest\x1a\x12.TerminateResponse\x12\x34\n\x0fSuspendInstance\x12\x0f.SuspendRequest\x1a\x10.SuspendResponse\x12\x31\n\x0eResumeInstance\x12\x0e.ResumeRequest\x1a\x0f.ResumeResponse\x12\x41\n\x0eQueryInstances\x12\x16.QueryInstancesRequest\x1a\x17.QueryInstancesResponse\x12\x41\n\x0ePurgeInstances\x12\x16.PurgeInstancesRequest\x1a\x17.PurgeInstancesResponse\x12\x31\n\x0cGetWorkItems\x12\x14.GetWorkItemsRequest\x1a\t.WorkItem0\x01\x12@\n\x14\x43ompleteActivityTask\x12\x11.ActivityResponse\x1a\x15.CompleteTaskResponse\x12H\n\x18\x43ompleteOrchestratorTask\x12\x15.OrchestratorResponse\x1a\x15.CompleteTaskResponse\x12?\n\x12\x43ompleteEntityTask\x12\x12.EntityBatchResult\x1a\x15.CompleteTaskResponse\x12G\n\x15StreamInstanceHistory\x12\x1d.StreamInstanceHistoryRequest\x1a\r.HistoryChunk0\x01\x12>\n\rCreateTaskHub\x12\x15.CreateTaskHubRequest\x1a\x16.CreateTaskHubResponse\x12>\n\rDeleteTaskHub\x12\x15.DeleteTaskHubRequest\x1a\x16.DeleteTaskHubResponse\x12;\n\x0cSignalEntity\x12\x14.SignalEntityRequest\x1a\x15.SignalEntityResponse\x12\x32\n\tGetEntity\x12\x11.GetEntityRequest\x1a\x12.GetEntityResponse\x12>\n\rQueryEntities\x12\x15.QueryEntitiesRequest\x1a\x16.QueryEntitiesResponse\x12M\n\x12\x43leanEntityStorage\x12\x1a.CleanEntityStorageRequest\x1a\x1b.CleanEntityStorageResponse\x12X\n\x1b\x41\x62\x61ndonTaskActivityWorkItem\x12\x1b.AbandonActivityTaskRequest\x1a\x1c.AbandonActivityTaskResponse\x12\x66\n\x1f\x41\x62\x61ndonTaskOrchestratorWorkItem\x12 .AbandonOrchestrationTaskRequest\x1a!.AbandonOrchestrationTaskResponse\x12R\n\x19\x41\x62\x61ndonTaskEntityWorkItem\x12\x19.AbandonEntityTaskRequest\x1a\x1a.AbandonEntityTaskResponse\x12Y\n\x16RerunWorkflowFromEvent\x12\x1e.RerunWorkflowFromEventRequest\x1a\x1f.RerunWorkflowFromEventResponse\x12\x44\n\x0fListInstanceIDs\x12\x17.ListInstanceIDsRequest\x1a\x18.ListInstanceIDsResponse\x12M\n\x12GetInstanceHistory\x12\x1a.GetInstanceHistoryRequest\x1a\x1b.GetInstanceHistoryResponseBV\n+io.dapr.durabletask.implementation.protobufZ\x0b/api/protos\xaa\x02\x19\x44\x61pr.DurableTask.Protobufb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -44,232 +44,250 @@ _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_options = b'8\001' _globals['_ORCHESTRATIONSTATE_TAGSENTRY']._loaded_options = None _globals['_ORCHESTRATIONSTATE_TAGSENTRY']._serialized_options = b'8\001' - _globals['_ORCHESTRATIONSTATUS']._serialized_start=16113 - _globals['_ORCHESTRATIONSTATUS']._serialized_end=16422 - _globals['_CREATEORCHESTRATIONACTION']._serialized_start=16424 - _globals['_CREATEORCHESTRATIONACTION']._serialized_end=16489 - _globals['_WORKERCAPABILITY']._serialized_start=16491 - _globals['_WORKERCAPABILITY']._serialized_end=16585 + _globals['_STALLEDREASON']._serialized_start=17324 + _globals['_STALLEDREASON']._serialized_end=17386 + _globals['_ORCHESTRATIONSTATUS']._serialized_start=17389 + _globals['_ORCHESTRATIONSTATUS']._serialized_end=17732 + _globals['_CREATEORCHESTRATIONACTION']._serialized_start=17734 + _globals['_CREATEORCHESTRATIONACTION']._serialized_end=17799 + _globals['_WORKERCAPABILITY']._serialized_start=17801 + _globals['_WORKERCAPABILITY']._serialized_end=17895 _globals['_TASKROUTER']._serialized_start=177 _globals['_TASKROUTER']._serialized_end=252 - _globals['_ORCHESTRATIONINSTANCE']._serialized_start=254 - _globals['_ORCHESTRATIONINSTANCE']._serialized_end=348 - _globals['_ACTIVITYREQUEST']._serialized_start=351 - _globals['_ACTIVITYREQUEST']._serialized_end=613 - _globals['_ACTIVITYRESPONSE']._serialized_start=616 - _globals['_ACTIVITYRESPONSE']._serialized_end=786 - _globals['_TASKFAILUREDETAILS']._serialized_start=789 - _globals['_TASKFAILUREDETAILS']._serialized_end=967 - _globals['_PARENTINSTANCEINFO']._serialized_start=970 - _globals['_PARENTINSTANCEINFO']._serialized_end=1191 - _globals['_TRACECONTEXT']._serialized_start=1193 - _globals['_TRACECONTEXT']._serialized_end=1298 - _globals['_EXECUTIONSTARTEDEVENT']._serialized_start=1301 - _globals['_EXECUTIONSTARTEDEVENT']._serialized_end=1786 - _globals['_EXECUTIONSTARTEDEVENT_TAGSENTRY']._serialized_start=1743 - _globals['_EXECUTIONSTARTEDEVENT_TAGSENTRY']._serialized_end=1786 - _globals['_EXECUTIONCOMPLETEDEVENT']._serialized_start=1789 - _globals['_EXECUTIONCOMPLETEDEVENT']._serialized_end=1956 - _globals['_EXECUTIONTERMINATEDEVENT']._serialized_start=1958 - _globals['_EXECUTIONTERMINATEDEVENT']._serialized_end=2046 - _globals['_TASKSCHEDULEDEVENT']._serialized_start=2049 - _globals['_TASKSCHEDULEDEVENT']._serialized_end=2243 - _globals['_TASKCOMPLETEDEVENT']._serialized_start=2245 - _globals['_TASKCOMPLETEDEVENT']._serialized_end=2361 - _globals['_TASKFAILEDEVENT']._serialized_start=2363 - _globals['_TASKFAILEDEVENT']._serialized_end=2475 - _globals['_SUBORCHESTRATIONINSTANCECREATEDEVENT']._serialized_start=2478 - _globals['_SUBORCHESTRATIONINSTANCECREATEDEVENT']._serialized_end=2685 - _globals['_SUBORCHESTRATIONINSTANCECOMPLETEDEVENT']._serialized_start=2687 - _globals['_SUBORCHESTRATIONINSTANCECOMPLETEDEVENT']._serialized_end=2798 - _globals['_SUBORCHESTRATIONINSTANCEFAILEDEVENT']._serialized_start=2800 - _globals['_SUBORCHESTRATIONINSTANCEFAILEDEVENT']._serialized_end=2907 - _globals['_TIMERCREATEDEVENT']._serialized_start=2909 - _globals['_TIMERCREATEDEVENT']._serialized_end=3000 - _globals['_TIMERFIREDEVENT']._serialized_start=3002 - _globals['_TIMERFIREDEVENT']._serialized_end=3080 - _globals['_ORCHESTRATORSTARTEDEVENT']._serialized_start=3082 - _globals['_ORCHESTRATORSTARTEDEVENT']._serialized_end=3108 - _globals['_ORCHESTRATORCOMPLETEDEVENT']._serialized_start=3110 - _globals['_ORCHESTRATORCOMPLETEDEVENT']._serialized_end=3138 - _globals['_EVENTSENTEVENT']._serialized_start=3140 - _globals['_EVENTSENTEVENT']._serialized_end=3235 - _globals['_EVENTRAISEDEVENT']._serialized_start=3237 - _globals['_EVENTRAISEDEVENT']._serialized_end=3314 - _globals['_GENERICEVENT']._serialized_start=3316 - _globals['_GENERICEVENT']._serialized_end=3374 - _globals['_HISTORYSTATEEVENT']._serialized_start=3376 - _globals['_HISTORYSTATEEVENT']._serialized_end=3444 - _globals['_CONTINUEASNEWEVENT']._serialized_start=3446 - _globals['_CONTINUEASNEWEVENT']._serialized_end=3511 - _globals['_EXECUTIONSUSPENDEDEVENT']._serialized_start=3513 - _globals['_EXECUTIONSUSPENDEDEVENT']._serialized_end=3583 - _globals['_EXECUTIONRESUMEDEVENT']._serialized_start=3585 - _globals['_EXECUTIONRESUMEDEVENT']._serialized_end=3653 - _globals['_ENTITYOPERATIONSIGNALEDEVENT']._serialized_start=3656 - _globals['_ENTITYOPERATIONSIGNALEDEVENT']._serialized_end=3876 - _globals['_ENTITYOPERATIONCALLEDEVENT']._serialized_start=3879 - _globals['_ENTITYOPERATIONCALLEDEVENT']._serialized_end=4210 - _globals['_ENTITYLOCKREQUESTEDEVENT']._serialized_start=4213 - _globals['_ENTITYLOCKREQUESTEDEVENT']._serialized_end=4357 - _globals['_ENTITYOPERATIONCOMPLETEDEVENT']._serialized_start=4359 - _globals['_ENTITYOPERATIONCOMPLETEDEVENT']._serialized_end=4455 - _globals['_ENTITYOPERATIONFAILEDEVENT']._serialized_start=4457 - _globals['_ENTITYOPERATIONFAILEDEVENT']._serialized_end=4549 - _globals['_ENTITYUNLOCKSENTEVENT']._serialized_start=4552 - _globals['_ENTITYUNLOCKSENTEVENT']._serialized_end=4714 - _globals['_ENTITYLOCKGRANTEDEVENT']._serialized_start=4716 - _globals['_ENTITYLOCKGRANTEDEVENT']._serialized_end=4767 - _globals['_HISTORYEVENT']._serialized_start=4770 - _globals['_HISTORYEVENT']._serialized_end=6395 - _globals['_SCHEDULETASKACTION']._serialized_start=6398 - _globals['_SCHEDULETASKACTION']._serialized_end=6594 - _globals['_CREATESUBORCHESTRATIONACTION']._serialized_start=6597 - _globals['_CREATESUBORCHESTRATIONACTION']._serialized_end=6798 - _globals['_CREATETIMERACTION']._serialized_start=6800 - _globals['_CREATETIMERACTION']._serialized_end=6891 - _globals['_SENDEVENTACTION']._serialized_start=6893 - _globals['_SENDEVENTACTION']._serialized_end=7010 - _globals['_COMPLETEORCHESTRATIONACTION']._serialized_start=7013 - _globals['_COMPLETEORCHESTRATIONACTION']._serialized_end=7321 - _globals['_TERMINATEORCHESTRATIONACTION']._serialized_start=7323 - _globals['_TERMINATEORCHESTRATIONACTION']._serialized_end=7436 - _globals['_SENDENTITYMESSAGEACTION']._serialized_start=7439 - _globals['_SENDENTITYMESSAGEACTION']._serialized_end=7723 - _globals['_ORCHESTRATORACTION']._serialized_start=7726 - _globals['_ORCHESTRATORACTION']._serialized_end=8204 - _globals['_ORCHESTRATORREQUEST']._serialized_start=8207 - _globals['_ORCHESTRATORREQUEST']._serialized_end=8504 - _globals['_ORCHESTRATORRESPONSE']._serialized_start=8507 - _globals['_ORCHESTRATORRESPONSE']._serialized_end=8721 - _globals['_CREATEINSTANCEREQUEST']._serialized_start=8724 - _globals['_CREATEINSTANCEREQUEST']._serialized_end=9186 - _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_start=1743 - _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_end=1786 - _globals['_ORCHESTRATIONIDREUSEPOLICY']._serialized_start=9188 - _globals['_ORCHESTRATIONIDREUSEPOLICY']._serialized_end=9307 - _globals['_CREATEINSTANCERESPONSE']._serialized_start=9309 - _globals['_CREATEINSTANCERESPONSE']._serialized_end=9353 - _globals['_GETINSTANCEREQUEST']._serialized_start=9355 - _globals['_GETINSTANCEREQUEST']._serialized_end=9424 - _globals['_GETINSTANCERESPONSE']._serialized_start=9426 - _globals['_GETINSTANCERESPONSE']._serialized_end=9512 - _globals['_REWINDINSTANCEREQUEST']._serialized_start=9514 - _globals['_REWINDINSTANCEREQUEST']._serialized_end=9603 - _globals['_REWINDINSTANCERESPONSE']._serialized_start=9605 - _globals['_REWINDINSTANCERESPONSE']._serialized_end=9629 - _globals['_ORCHESTRATIONSTATE']._serialized_start=9632 - _globals['_ORCHESTRATIONSTATE']._serialized_end=10398 - _globals['_ORCHESTRATIONSTATE_TAGSENTRY']._serialized_start=1743 - _globals['_ORCHESTRATIONSTATE_TAGSENTRY']._serialized_end=1786 - _globals['_RAISEEVENTREQUEST']._serialized_start=10400 - _globals['_RAISEEVENTREQUEST']._serialized_end=10498 - _globals['_RAISEEVENTRESPONSE']._serialized_start=10500 - _globals['_RAISEEVENTRESPONSE']._serialized_end=10520 - _globals['_TERMINATEREQUEST']._serialized_start=10522 - _globals['_TERMINATEREQUEST']._serialized_end=10625 - _globals['_TERMINATERESPONSE']._serialized_start=10627 - _globals['_TERMINATERESPONSE']._serialized_end=10646 - _globals['_SUSPENDREQUEST']._serialized_start=10648 - _globals['_SUSPENDREQUEST']._serialized_end=10730 - _globals['_SUSPENDRESPONSE']._serialized_start=10732 - _globals['_SUSPENDRESPONSE']._serialized_end=10749 - _globals['_RESUMEREQUEST']._serialized_start=10751 - _globals['_RESUMEREQUEST']._serialized_end=10832 - _globals['_RESUMERESPONSE']._serialized_start=10834 - _globals['_RESUMERESPONSE']._serialized_end=10850 - _globals['_QUERYINSTANCESREQUEST']._serialized_start=10852 - _globals['_QUERYINSTANCESREQUEST']._serialized_end=10906 - _globals['_INSTANCEQUERY']._serialized_start=10909 - _globals['_INSTANCEQUERY']._serialized_end=11295 - _globals['_QUERYINSTANCESRESPONSE']._serialized_start=11298 - _globals['_QUERYINSTANCESRESPONSE']._serialized_end=11428 - _globals['_PURGEINSTANCESREQUEST']._serialized_start=11431 - _globals['_PURGEINSTANCESREQUEST']._serialized_end=11559 - _globals['_PURGEINSTANCEFILTER']._serialized_start=11562 - _globals['_PURGEINSTANCEFILTER']._serialized_end=11732 - _globals['_PURGEINSTANCESRESPONSE']._serialized_start=11734 - _globals['_PURGEINSTANCESRESPONSE']._serialized_end=11836 - _globals['_CREATETASKHUBREQUEST']._serialized_start=11838 - _globals['_CREATETASKHUBREQUEST']._serialized_end=11886 - _globals['_CREATETASKHUBRESPONSE']._serialized_start=11888 - _globals['_CREATETASKHUBRESPONSE']._serialized_end=11911 - _globals['_DELETETASKHUBREQUEST']._serialized_start=11913 - _globals['_DELETETASKHUBREQUEST']._serialized_end=11935 - _globals['_DELETETASKHUBRESPONSE']._serialized_start=11937 - _globals['_DELETETASKHUBRESPONSE']._serialized_end=11960 - _globals['_SIGNALENTITYREQUEST']._serialized_start=11963 - _globals['_SIGNALENTITYREQUEST']._serialized_end=12133 - _globals['_SIGNALENTITYRESPONSE']._serialized_start=12135 - _globals['_SIGNALENTITYRESPONSE']._serialized_end=12157 - _globals['_GETENTITYREQUEST']._serialized_start=12159 - _globals['_GETENTITYREQUEST']._serialized_end=12219 - _globals['_GETENTITYRESPONSE']._serialized_start=12221 - _globals['_GETENTITYRESPONSE']._serialized_end=12289 - _globals['_ENTITYQUERY']._serialized_start=12292 - _globals['_ENTITYQUERY']._serialized_end=12623 - _globals['_QUERYENTITIESREQUEST']._serialized_start=12625 - _globals['_QUERYENTITIESREQUEST']._serialized_end=12676 - _globals['_QUERYENTITIESRESPONSE']._serialized_start=12678 - _globals['_QUERYENTITIESRESPONSE']._serialized_end=12793 - _globals['_ENTITYMETADATA']._serialized_start=12796 - _globals['_ENTITYMETADATA']._serialized_end=13015 - _globals['_CLEANENTITYSTORAGEREQUEST']._serialized_start=13018 - _globals['_CLEANENTITYSTORAGEREQUEST']._serialized_end=13161 - _globals['_CLEANENTITYSTORAGERESPONSE']._serialized_start=13164 - _globals['_CLEANENTITYSTORAGERESPONSE']._serialized_end=13310 - _globals['_ORCHESTRATORENTITYPARAMETERS']._serialized_start=13312 - _globals['_ORCHESTRATORENTITYPARAMETERS']._serialized_end=13405 - _globals['_ENTITYBATCHREQUEST']._serialized_start=13408 - _globals['_ENTITYBATCHREQUEST']._serialized_end=13538 - _globals['_ENTITYBATCHRESULT']._serialized_start=13541 - _globals['_ENTITYBATCHRESULT']._serialized_end=13791 - _globals['_ENTITYREQUEST']._serialized_start=13794 - _globals['_ENTITYREQUEST']._serialized_end=13943 - _globals['_OPERATIONREQUEST']._serialized_start=13945 - _globals['_OPERATIONREQUEST']._serialized_end=14046 - _globals['_OPERATIONRESULT']._serialized_start=14048 - _globals['_OPERATIONRESULT']._serialized_end=14167 - _globals['_OPERATIONINFO']._serialized_start=14169 - _globals['_OPERATIONINFO']._serialized_end=14256 - _globals['_OPERATIONRESULTSUCCESS']._serialized_start=14258 - _globals['_OPERATIONRESULTSUCCESS']._serialized_end=14328 - _globals['_OPERATIONRESULTFAILURE']._serialized_start=14330 - _globals['_OPERATIONRESULTFAILURE']._serialized_end=14399 - _globals['_OPERATIONACTION']._serialized_start=14402 - _globals['_OPERATIONACTION']._serialized_end=14558 - _globals['_SENDSIGNALACTION']._serialized_start=14561 - _globals['_SENDSIGNALACTION']._serialized_end=14709 - _globals['_STARTNEWORCHESTRATIONACTION']._serialized_start=14712 - _globals['_STARTNEWORCHESTRATIONACTION']._serialized_end=14918 - _globals['_ABANDONACTIVITYTASKREQUEST']._serialized_start=14920 - _globals['_ABANDONACTIVITYTASKREQUEST']._serialized_end=14973 - _globals['_ABANDONACTIVITYTASKRESPONSE']._serialized_start=14975 - _globals['_ABANDONACTIVITYTASKRESPONSE']._serialized_end=15004 - _globals['_ABANDONORCHESTRATIONTASKREQUEST']._serialized_start=15006 - _globals['_ABANDONORCHESTRATIONTASKREQUEST']._serialized_end=15064 - _globals['_ABANDONORCHESTRATIONTASKRESPONSE']._serialized_start=15066 - _globals['_ABANDONORCHESTRATIONTASKRESPONSE']._serialized_end=15100 - _globals['_ABANDONENTITYTASKREQUEST']._serialized_start=15102 - _globals['_ABANDONENTITYTASKREQUEST']._serialized_end=15153 - _globals['_ABANDONENTITYTASKRESPONSE']._serialized_start=15155 - _globals['_ABANDONENTITYTASKRESPONSE']._serialized_end=15182 - _globals['_GETWORKITEMSREQUEST']._serialized_start=15185 - _globals['_GETWORKITEMSREQUEST']._serialized_end=15370 - _globals['_WORKITEM']._serialized_start=15373 - _globals['_WORKITEM']._serialized_end=15641 - _globals['_COMPLETETASKRESPONSE']._serialized_start=15643 - _globals['_COMPLETETASKRESPONSE']._serialized_end=15665 - _globals['_HEALTHPING']._serialized_start=15667 - _globals['_HEALTHPING']._serialized_end=15679 - _globals['_STREAMINSTANCEHISTORYREQUEST']._serialized_start=15682 - _globals['_STREAMINSTANCEHISTORYREQUEST']._serialized_end=15814 - _globals['_HISTORYCHUNK']._serialized_start=15816 - _globals['_HISTORYCHUNK']._serialized_end=15861 - _globals['_RERUNWORKFLOWFROMEVENTREQUEST']._serialized_start=15864 - _globals['_RERUNWORKFLOWFROMEVENTREQUEST']._serialized_end=16053 - _globals['_RERUNWORKFLOWFROMEVENTRESPONSE']._serialized_start=16055 - _globals['_RERUNWORKFLOWFROMEVENTRESPONSE']._serialized_end=16110 - _globals['_TASKHUBSIDECARSERVICE']._serialized_start=16588 - _globals['_TASKHUBSIDECARSERVICE']._serialized_end=18434 + _globals['_ORCHESTRATIONVERSION']._serialized_start=254 + _globals['_ORCHESTRATIONVERSION']._serialized_end=321 + _globals['_ORCHESTRATIONINSTANCE']._serialized_start=323 + _globals['_ORCHESTRATIONINSTANCE']._serialized_end=417 + _globals['_ACTIVITYREQUEST']._serialized_start=420 + _globals['_ACTIVITYREQUEST']._serialized_end=682 + _globals['_ACTIVITYRESPONSE']._serialized_start=685 + _globals['_ACTIVITYRESPONSE']._serialized_end=855 + _globals['_TASKFAILUREDETAILS']._serialized_start=858 + _globals['_TASKFAILUREDETAILS']._serialized_end=1036 + _globals['_PARENTINSTANCEINFO']._serialized_start=1039 + _globals['_PARENTINSTANCEINFO']._serialized_end=1260 + _globals['_RERUNPARENTINSTANCEINFO']._serialized_start=1262 + _globals['_RERUNPARENTINSTANCEINFO']._serialized_end=1307 + _globals['_TRACECONTEXT']._serialized_start=1309 + _globals['_TRACECONTEXT']._serialized_end=1414 + _globals['_EXECUTIONSTARTEDEVENT']._serialized_start=1417 + _globals['_EXECUTIONSTARTEDEVENT']._serialized_end=1902 + _globals['_EXECUTIONSTARTEDEVENT_TAGSENTRY']._serialized_start=1859 + _globals['_EXECUTIONSTARTEDEVENT_TAGSENTRY']._serialized_end=1902 + _globals['_EXECUTIONCOMPLETEDEVENT']._serialized_start=1905 + _globals['_EXECUTIONCOMPLETEDEVENT']._serialized_end=2072 + _globals['_EXECUTIONTERMINATEDEVENT']._serialized_start=2074 + _globals['_EXECUTIONTERMINATEDEVENT']._serialized_end=2162 + _globals['_TASKSCHEDULEDEVENT']._serialized_start=2165 + _globals['_TASKSCHEDULEDEVENT']._serialized_end=2451 + _globals['_TASKCOMPLETEDEVENT']._serialized_start=2453 + _globals['_TASKCOMPLETEDEVENT']._serialized_end=2569 + _globals['_TASKFAILEDEVENT']._serialized_start=2571 + _globals['_TASKFAILEDEVENT']._serialized_end=2683 + _globals['_SUBORCHESTRATIONINSTANCECREATEDEVENT']._serialized_start=2686 + _globals['_SUBORCHESTRATIONINSTANCECREATEDEVENT']._serialized_end=2985 + _globals['_SUBORCHESTRATIONINSTANCECOMPLETEDEVENT']._serialized_start=2987 + _globals['_SUBORCHESTRATIONINSTANCECOMPLETEDEVENT']._serialized_end=3098 + _globals['_SUBORCHESTRATIONINSTANCEFAILEDEVENT']._serialized_start=3100 + _globals['_SUBORCHESTRATIONINSTANCEFAILEDEVENT']._serialized_end=3207 + _globals['_TIMERCREATEDEVENT']._serialized_start=3210 + _globals['_TIMERCREATEDEVENT']._serialized_end=3393 + _globals['_TIMERFIREDEVENT']._serialized_start=3395 + _globals['_TIMERFIREDEVENT']._serialized_end=3473 + _globals['_ORCHESTRATORSTARTEDEVENT']._serialized_start=3475 + _globals['_ORCHESTRATORSTARTEDEVENT']._serialized_end=3558 + _globals['_ORCHESTRATORCOMPLETEDEVENT']._serialized_start=3560 + _globals['_ORCHESTRATORCOMPLETEDEVENT']._serialized_end=3588 + _globals['_EVENTSENTEVENT']._serialized_start=3590 + _globals['_EVENTSENTEVENT']._serialized_end=3685 + _globals['_EVENTRAISEDEVENT']._serialized_start=3687 + _globals['_EVENTRAISEDEVENT']._serialized_end=3764 + _globals['_GENERICEVENT']._serialized_start=3766 + _globals['_GENERICEVENT']._serialized_end=3824 + _globals['_HISTORYSTATEEVENT']._serialized_start=3826 + _globals['_HISTORYSTATEEVENT']._serialized_end=3894 + _globals['_CONTINUEASNEWEVENT']._serialized_start=3896 + _globals['_CONTINUEASNEWEVENT']._serialized_end=3961 + _globals['_EXECUTIONSUSPENDEDEVENT']._serialized_start=3963 + _globals['_EXECUTIONSUSPENDEDEVENT']._serialized_end=4033 + _globals['_EXECUTIONRESUMEDEVENT']._serialized_start=4035 + _globals['_EXECUTIONRESUMEDEVENT']._serialized_end=4103 + _globals['_EXECUTIONSTALLEDEVENT']._serialized_start=4105 + _globals['_EXECUTIONSTALLEDEVENT']._serialized_end=4202 + _globals['_ENTITYOPERATIONSIGNALEDEVENT']._serialized_start=4205 + _globals['_ENTITYOPERATIONSIGNALEDEVENT']._serialized_end=4425 + _globals['_ENTITYOPERATIONCALLEDEVENT']._serialized_start=4428 + _globals['_ENTITYOPERATIONCALLEDEVENT']._serialized_end=4759 + _globals['_ENTITYLOCKREQUESTEDEVENT']._serialized_start=4762 + _globals['_ENTITYLOCKREQUESTEDEVENT']._serialized_end=4906 + _globals['_ENTITYOPERATIONCOMPLETEDEVENT']._serialized_start=4908 + _globals['_ENTITYOPERATIONCOMPLETEDEVENT']._serialized_end=5004 + _globals['_ENTITYOPERATIONFAILEDEVENT']._serialized_start=5006 + _globals['_ENTITYOPERATIONFAILEDEVENT']._serialized_end=5098 + _globals['_ENTITYUNLOCKSENTEVENT']._serialized_start=5101 + _globals['_ENTITYUNLOCKSENTEVENT']._serialized_end=5263 + _globals['_ENTITYLOCKGRANTEDEVENT']._serialized_start=5265 + _globals['_ENTITYLOCKGRANTEDEVENT']._serialized_end=5316 + _globals['_HISTORYEVENT']._serialized_start=5319 + _globals['_HISTORYEVENT']._serialized_end=6996 + _globals['_SCHEDULETASKACTION']._serialized_start=6999 + _globals['_SCHEDULETASKACTION']._serialized_end=7195 + _globals['_CREATESUBORCHESTRATIONACTION']._serialized_start=7198 + _globals['_CREATESUBORCHESTRATIONACTION']._serialized_end=7399 + _globals['_CREATETIMERACTION']._serialized_start=7401 + _globals['_CREATETIMERACTION']._serialized_end=7492 + _globals['_SENDEVENTACTION']._serialized_start=7494 + _globals['_SENDEVENTACTION']._serialized_end=7611 + _globals['_COMPLETEORCHESTRATIONACTION']._serialized_start=7614 + _globals['_COMPLETEORCHESTRATIONACTION']._serialized_end=7922 + _globals['_TERMINATEORCHESTRATIONACTION']._serialized_start=7924 + _globals['_TERMINATEORCHESTRATIONACTION']._serialized_end=8037 + _globals['_SENDENTITYMESSAGEACTION']._serialized_start=8040 + _globals['_SENDENTITYMESSAGEACTION']._serialized_end=8324 + _globals['_ORCHESTRATORVERSIONNOTAVAILABLEACTION']._serialized_start=8326 + _globals['_ORCHESTRATORVERSIONNOTAVAILABLEACTION']._serialized_end=8365 + _globals['_ORCHESTRATORACTION']._serialized_start=8368 + _globals['_ORCHESTRATORACTION']._serialized_end=8929 + _globals['_ORCHESTRATORREQUEST']._serialized_start=8932 + _globals['_ORCHESTRATORREQUEST']._serialized_end=9229 + _globals['_ORCHESTRATORRESPONSE']._serialized_start=9232 + _globals['_ORCHESTRATORRESPONSE']._serialized_end=9503 + _globals['_CREATEINSTANCEREQUEST']._serialized_start=9506 + _globals['_CREATEINSTANCEREQUEST']._serialized_end=9968 + _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_start=1859 + _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_end=1902 + _globals['_ORCHESTRATIONIDREUSEPOLICY']._serialized_start=9970 + _globals['_ORCHESTRATIONIDREUSEPOLICY']._serialized_end=10089 + _globals['_CREATEINSTANCERESPONSE']._serialized_start=10091 + _globals['_CREATEINSTANCERESPONSE']._serialized_end=10135 + _globals['_GETINSTANCEREQUEST']._serialized_start=10137 + _globals['_GETINSTANCEREQUEST']._serialized_end=10206 + _globals['_GETINSTANCERESPONSE']._serialized_start=10208 + _globals['_GETINSTANCERESPONSE']._serialized_end=10294 + _globals['_REWINDINSTANCEREQUEST']._serialized_start=10296 + _globals['_REWINDINSTANCEREQUEST']._serialized_end=10385 + _globals['_REWINDINSTANCERESPONSE']._serialized_start=10387 + _globals['_REWINDINSTANCERESPONSE']._serialized_end=10411 + _globals['_ORCHESTRATIONSTATE']._serialized_start=10414 + _globals['_ORCHESTRATIONSTATE']._serialized_end=11180 + _globals['_ORCHESTRATIONSTATE_TAGSENTRY']._serialized_start=1859 + _globals['_ORCHESTRATIONSTATE_TAGSENTRY']._serialized_end=1902 + _globals['_RAISEEVENTREQUEST']._serialized_start=11182 + _globals['_RAISEEVENTREQUEST']._serialized_end=11280 + _globals['_RAISEEVENTRESPONSE']._serialized_start=11282 + _globals['_RAISEEVENTRESPONSE']._serialized_end=11302 + _globals['_TERMINATEREQUEST']._serialized_start=11304 + _globals['_TERMINATEREQUEST']._serialized_end=11407 + _globals['_TERMINATERESPONSE']._serialized_start=11409 + _globals['_TERMINATERESPONSE']._serialized_end=11428 + _globals['_SUSPENDREQUEST']._serialized_start=11430 + _globals['_SUSPENDREQUEST']._serialized_end=11512 + _globals['_SUSPENDRESPONSE']._serialized_start=11514 + _globals['_SUSPENDRESPONSE']._serialized_end=11531 + _globals['_RESUMEREQUEST']._serialized_start=11533 + _globals['_RESUMEREQUEST']._serialized_end=11614 + _globals['_RESUMERESPONSE']._serialized_start=11616 + _globals['_RESUMERESPONSE']._serialized_end=11632 + _globals['_QUERYINSTANCESREQUEST']._serialized_start=11634 + _globals['_QUERYINSTANCESREQUEST']._serialized_end=11688 + _globals['_INSTANCEQUERY']._serialized_start=11691 + _globals['_INSTANCEQUERY']._serialized_end=12077 + _globals['_QUERYINSTANCESRESPONSE']._serialized_start=12080 + _globals['_QUERYINSTANCESRESPONSE']._serialized_end=12210 + _globals['_PURGEINSTANCESREQUEST']._serialized_start=12213 + _globals['_PURGEINSTANCESREQUEST']._serialized_end=12371 + _globals['_PURGEINSTANCEFILTER']._serialized_start=12374 + _globals['_PURGEINSTANCEFILTER']._serialized_end=12544 + _globals['_PURGEINSTANCESRESPONSE']._serialized_start=12546 + _globals['_PURGEINSTANCESRESPONSE']._serialized_end=12648 + _globals['_CREATETASKHUBREQUEST']._serialized_start=12650 + _globals['_CREATETASKHUBREQUEST']._serialized_end=12698 + _globals['_CREATETASKHUBRESPONSE']._serialized_start=12700 + _globals['_CREATETASKHUBRESPONSE']._serialized_end=12723 + _globals['_DELETETASKHUBREQUEST']._serialized_start=12725 + _globals['_DELETETASKHUBREQUEST']._serialized_end=12747 + _globals['_DELETETASKHUBRESPONSE']._serialized_start=12749 + _globals['_DELETETASKHUBRESPONSE']._serialized_end=12772 + _globals['_SIGNALENTITYREQUEST']._serialized_start=12775 + _globals['_SIGNALENTITYREQUEST']._serialized_end=12945 + _globals['_SIGNALENTITYRESPONSE']._serialized_start=12947 + _globals['_SIGNALENTITYRESPONSE']._serialized_end=12969 + _globals['_GETENTITYREQUEST']._serialized_start=12971 + _globals['_GETENTITYREQUEST']._serialized_end=13031 + _globals['_GETENTITYRESPONSE']._serialized_start=13033 + _globals['_GETENTITYRESPONSE']._serialized_end=13101 + _globals['_ENTITYQUERY']._serialized_start=13104 + _globals['_ENTITYQUERY']._serialized_end=13435 + _globals['_QUERYENTITIESREQUEST']._serialized_start=13437 + _globals['_QUERYENTITIESREQUEST']._serialized_end=13488 + _globals['_QUERYENTITIESRESPONSE']._serialized_start=13490 + _globals['_QUERYENTITIESRESPONSE']._serialized_end=13605 + _globals['_ENTITYMETADATA']._serialized_start=13608 + _globals['_ENTITYMETADATA']._serialized_end=13827 + _globals['_CLEANENTITYSTORAGEREQUEST']._serialized_start=13830 + _globals['_CLEANENTITYSTORAGEREQUEST']._serialized_end=13973 + _globals['_CLEANENTITYSTORAGERESPONSE']._serialized_start=13976 + _globals['_CLEANENTITYSTORAGERESPONSE']._serialized_end=14122 + _globals['_ORCHESTRATORENTITYPARAMETERS']._serialized_start=14124 + _globals['_ORCHESTRATORENTITYPARAMETERS']._serialized_end=14217 + _globals['_ENTITYBATCHREQUEST']._serialized_start=14220 + _globals['_ENTITYBATCHREQUEST']._serialized_end=14350 + _globals['_ENTITYBATCHRESULT']._serialized_start=14353 + _globals['_ENTITYBATCHRESULT']._serialized_end=14603 + _globals['_ENTITYREQUEST']._serialized_start=14606 + _globals['_ENTITYREQUEST']._serialized_end=14755 + _globals['_OPERATIONREQUEST']._serialized_start=14757 + _globals['_OPERATIONREQUEST']._serialized_end=14858 + _globals['_OPERATIONRESULT']._serialized_start=14860 + _globals['_OPERATIONRESULT']._serialized_end=14979 + _globals['_OPERATIONINFO']._serialized_start=14981 + _globals['_OPERATIONINFO']._serialized_end=15068 + _globals['_OPERATIONRESULTSUCCESS']._serialized_start=15070 + _globals['_OPERATIONRESULTSUCCESS']._serialized_end=15140 + _globals['_OPERATIONRESULTFAILURE']._serialized_start=15142 + _globals['_OPERATIONRESULTFAILURE']._serialized_end=15211 + _globals['_OPERATIONACTION']._serialized_start=15214 + _globals['_OPERATIONACTION']._serialized_end=15370 + _globals['_SENDSIGNALACTION']._serialized_start=15373 + _globals['_SENDSIGNALACTION']._serialized_end=15521 + _globals['_STARTNEWORCHESTRATIONACTION']._serialized_start=15524 + _globals['_STARTNEWORCHESTRATIONACTION']._serialized_end=15730 + _globals['_ABANDONACTIVITYTASKREQUEST']._serialized_start=15732 + _globals['_ABANDONACTIVITYTASKREQUEST']._serialized_end=15785 + _globals['_ABANDONACTIVITYTASKRESPONSE']._serialized_start=15787 + _globals['_ABANDONACTIVITYTASKRESPONSE']._serialized_end=15816 + _globals['_ABANDONORCHESTRATIONTASKREQUEST']._serialized_start=15818 + _globals['_ABANDONORCHESTRATIONTASKREQUEST']._serialized_end=15876 + _globals['_ABANDONORCHESTRATIONTASKRESPONSE']._serialized_start=15878 + _globals['_ABANDONORCHESTRATIONTASKRESPONSE']._serialized_end=15912 + _globals['_ABANDONENTITYTASKREQUEST']._serialized_start=15914 + _globals['_ABANDONENTITYTASKREQUEST']._serialized_end=15965 + _globals['_ABANDONENTITYTASKRESPONSE']._serialized_start=15967 + _globals['_ABANDONENTITYTASKRESPONSE']._serialized_end=15994 + _globals['_GETWORKITEMSREQUEST']._serialized_start=15997 + _globals['_GETWORKITEMSREQUEST']._serialized_end=16182 + _globals['_WORKITEM']._serialized_start=16185 + _globals['_WORKITEM']._serialized_end=16453 + _globals['_COMPLETETASKRESPONSE']._serialized_start=16455 + _globals['_COMPLETETASKRESPONSE']._serialized_end=16477 + _globals['_HEALTHPING']._serialized_start=16479 + _globals['_HEALTHPING']._serialized_end=16491 + _globals['_STREAMINSTANCEHISTORYREQUEST']._serialized_start=16494 + _globals['_STREAMINSTANCEHISTORYREQUEST']._serialized_end=16626 + _globals['_HISTORYCHUNK']._serialized_start=16628 + _globals['_HISTORYCHUNK']._serialized_end=16673 + _globals['_RERUNWORKFLOWFROMEVENTREQUEST']._serialized_start=16676 + _globals['_RERUNWORKFLOWFROMEVENTREQUEST']._serialized_end=16937 + _globals['_RERUNWORKFLOWFROMEVENTRESPONSE']._serialized_start=16939 + _globals['_RERUNWORKFLOWFROMEVENTRESPONSE']._serialized_end=16994 + _globals['_LISTINSTANCEIDSREQUEST']._serialized_start=16996 + _globals['_LISTINSTANCEIDSREQUEST']._serialized_end=17110 + _globals['_LISTINSTANCEIDSRESPONSE']._serialized_start=17112 + _globals['_LISTINSTANCEIDSRESPONSE']._serialized_end=17212 + _globals['_GETINSTANCEHISTORYREQUEST']._serialized_start=17214 + _globals['_GETINSTANCEHISTORYREQUEST']._serialized_end=17261 + _globals['_GETINSTANCEHISTORYRESPONSE']._serialized_start=17263 + _globals['_GETINSTANCEHISTORYRESPONSE']._serialized_end=17322 + _globals['_TASKHUBSIDECARSERVICE']._serialized_start=17898 + _globals['_TASKHUBSIDECARSERVICE']._serialized_end=19893 # @@protoc_insertion_point(module_scope) diff --git a/durabletask/internal/orchestrator_service_pb2.pyi b/durabletask/internal/orchestrator_service_pb2.pyi index 102e183..53a3e48 100644 --- a/durabletask/internal/orchestrator_service_pb2.pyi +++ b/durabletask/internal/orchestrator_service_pb2.pyi @@ -13,6 +13,11 @@ from typing import ClassVar as _ClassVar, Optional as _Optional, Union as _Union DESCRIPTOR: _descriptor.FileDescriptor +class StalledReason(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): + __slots__ = () + PATCH_MISMATCH: _ClassVar[StalledReason] + VERSION_NOT_AVAILABLE: _ClassVar[StalledReason] + class OrchestrationStatus(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): __slots__ = () ORCHESTRATION_STATUS_RUNNING: _ClassVar[OrchestrationStatus] @@ -23,6 +28,7 @@ class OrchestrationStatus(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): ORCHESTRATION_STATUS_TERMINATED: _ClassVar[OrchestrationStatus] ORCHESTRATION_STATUS_PENDING: _ClassVar[OrchestrationStatus] ORCHESTRATION_STATUS_SUSPENDED: _ClassVar[OrchestrationStatus] + ORCHESTRATION_STATUS_STALLED: _ClassVar[OrchestrationStatus] class CreateOrchestrationAction(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): __slots__ = () @@ -34,6 +40,8 @@ class WorkerCapability(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): __slots__ = () WORKER_CAPABILITY_UNSPECIFIED: _ClassVar[WorkerCapability] WORKER_CAPABILITY_HISTORY_STREAMING: _ClassVar[WorkerCapability] +PATCH_MISMATCH: StalledReason +VERSION_NOT_AVAILABLE: StalledReason ORCHESTRATION_STATUS_RUNNING: OrchestrationStatus ORCHESTRATION_STATUS_COMPLETED: OrchestrationStatus ORCHESTRATION_STATUS_CONTINUED_AS_NEW: OrchestrationStatus @@ -42,6 +50,7 @@ ORCHESTRATION_STATUS_CANCELED: OrchestrationStatus ORCHESTRATION_STATUS_TERMINATED: OrchestrationStatus ORCHESTRATION_STATUS_PENDING: OrchestrationStatus ORCHESTRATION_STATUS_SUSPENDED: OrchestrationStatus +ORCHESTRATION_STATUS_STALLED: OrchestrationStatus ERROR: CreateOrchestrationAction IGNORE: CreateOrchestrationAction TERMINATE: CreateOrchestrationAction @@ -56,6 +65,14 @@ class TaskRouter(_message.Message): targetAppID: str def __init__(self, sourceAppID: _Optional[str] = ..., targetAppID: _Optional[str] = ...) -> None: ... +class OrchestrationVersion(_message.Message): + __slots__ = ("patches", "name") + PATCHES_FIELD_NUMBER: _ClassVar[int] + NAME_FIELD_NUMBER: _ClassVar[int] + patches: _containers.RepeatedScalarFieldContainer[str] + name: str + def __init__(self, patches: _Optional[_Iterable[str]] = ..., name: _Optional[str] = ...) -> None: ... + class OrchestrationInstance(_message.Message): __slots__ = ("instanceId", "executionId") INSTANCEID_FIELD_NUMBER: _ClassVar[int] @@ -124,6 +141,12 @@ class ParentInstanceInfo(_message.Message): appID: str def __init__(self, taskScheduledId: _Optional[int] = ..., name: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., orchestrationInstance: _Optional[_Union[OrchestrationInstance, _Mapping]] = ..., appID: _Optional[str] = ...) -> None: ... +class RerunParentInstanceInfo(_message.Message): + __slots__ = ("instanceID",) + INSTANCEID_FIELD_NUMBER: _ClassVar[int] + instanceID: str + def __init__(self, instanceID: _Optional[str] = ...) -> None: ... + class TraceContext(_message.Message): __slots__ = ("traceParent", "spanID", "traceState") TRACEPARENT_FIELD_NUMBER: _ClassVar[int] @@ -182,18 +205,20 @@ class ExecutionTerminatedEvent(_message.Message): def __init__(self, input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., recurse: bool = ...) -> None: ... class TaskScheduledEvent(_message.Message): - __slots__ = ("name", "version", "input", "parentTraceContext", "taskExecutionId") + __slots__ = ("name", "version", "input", "parentTraceContext", "taskExecutionId", "rerunParentInstanceInfo") NAME_FIELD_NUMBER: _ClassVar[int] VERSION_FIELD_NUMBER: _ClassVar[int] INPUT_FIELD_NUMBER: _ClassVar[int] PARENTTRACECONTEXT_FIELD_NUMBER: _ClassVar[int] TASKEXECUTIONID_FIELD_NUMBER: _ClassVar[int] + RERUNPARENTINSTANCEINFO_FIELD_NUMBER: _ClassVar[int] name: str version: _wrappers_pb2.StringValue input: _wrappers_pb2.StringValue parentTraceContext: TraceContext taskExecutionId: str - def __init__(self, name: _Optional[str] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., parentTraceContext: _Optional[_Union[TraceContext, _Mapping]] = ..., taskExecutionId: _Optional[str] = ...) -> None: ... + rerunParentInstanceInfo: RerunParentInstanceInfo + def __init__(self, name: _Optional[str] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., parentTraceContext: _Optional[_Union[TraceContext, _Mapping]] = ..., taskExecutionId: _Optional[str] = ..., rerunParentInstanceInfo: _Optional[_Union[RerunParentInstanceInfo, _Mapping]] = ...) -> None: ... class TaskCompletedEvent(_message.Message): __slots__ = ("taskScheduledId", "result", "taskExecutionId") @@ -216,18 +241,20 @@ class TaskFailedEvent(_message.Message): def __init__(self, taskScheduledId: _Optional[int] = ..., failureDetails: _Optional[_Union[TaskFailureDetails, _Mapping]] = ..., taskExecutionId: _Optional[str] = ...) -> None: ... class SubOrchestrationInstanceCreatedEvent(_message.Message): - __slots__ = ("instanceId", "name", "version", "input", "parentTraceContext") + __slots__ = ("instanceId", "name", "version", "input", "parentTraceContext", "rerunParentInstanceInfo") INSTANCEID_FIELD_NUMBER: _ClassVar[int] NAME_FIELD_NUMBER: _ClassVar[int] VERSION_FIELD_NUMBER: _ClassVar[int] INPUT_FIELD_NUMBER: _ClassVar[int] PARENTTRACECONTEXT_FIELD_NUMBER: _ClassVar[int] + RERUNPARENTINSTANCEINFO_FIELD_NUMBER: _ClassVar[int] instanceId: str name: str version: _wrappers_pb2.StringValue input: _wrappers_pb2.StringValue parentTraceContext: TraceContext - def __init__(self, instanceId: _Optional[str] = ..., name: _Optional[str] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., parentTraceContext: _Optional[_Union[TraceContext, _Mapping]] = ...) -> None: ... + rerunParentInstanceInfo: RerunParentInstanceInfo + def __init__(self, instanceId: _Optional[str] = ..., name: _Optional[str] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., parentTraceContext: _Optional[_Union[TraceContext, _Mapping]] = ..., rerunParentInstanceInfo: _Optional[_Union[RerunParentInstanceInfo, _Mapping]] = ...) -> None: ... class SubOrchestrationInstanceCompletedEvent(_message.Message): __slots__ = ("taskScheduledId", "result") @@ -246,12 +273,14 @@ class SubOrchestrationInstanceFailedEvent(_message.Message): def __init__(self, taskScheduledId: _Optional[int] = ..., failureDetails: _Optional[_Union[TaskFailureDetails, _Mapping]] = ...) -> None: ... class TimerCreatedEvent(_message.Message): - __slots__ = ("fireAt", "name") + __slots__ = ("fireAt", "name", "rerunParentInstanceInfo") FIREAT_FIELD_NUMBER: _ClassVar[int] NAME_FIELD_NUMBER: _ClassVar[int] + RERUNPARENTINSTANCEINFO_FIELD_NUMBER: _ClassVar[int] fireAt: _timestamp_pb2.Timestamp name: str - def __init__(self, fireAt: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ..., name: _Optional[str] = ...) -> None: ... + rerunParentInstanceInfo: RerunParentInstanceInfo + def __init__(self, fireAt: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ..., name: _Optional[str] = ..., rerunParentInstanceInfo: _Optional[_Union[RerunParentInstanceInfo, _Mapping]] = ...) -> None: ... class TimerFiredEvent(_message.Message): __slots__ = ("fireAt", "timerId") @@ -262,8 +291,10 @@ class TimerFiredEvent(_message.Message): def __init__(self, fireAt: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ..., timerId: _Optional[int] = ...) -> None: ... class OrchestratorStartedEvent(_message.Message): - __slots__ = () - def __init__(self) -> None: ... + __slots__ = ("version",) + VERSION_FIELD_NUMBER: _ClassVar[int] + version: OrchestrationVersion + def __init__(self, version: _Optional[_Union[OrchestrationVersion, _Mapping]] = ...) -> None: ... class OrchestratorCompletedEvent(_message.Message): __slots__ = () @@ -317,6 +348,14 @@ class ExecutionResumedEvent(_message.Message): input: _wrappers_pb2.StringValue def __init__(self, input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ...) -> None: ... +class ExecutionStalledEvent(_message.Message): + __slots__ = ("reason", "description") + REASON_FIELD_NUMBER: _ClassVar[int] + DESCRIPTION_FIELD_NUMBER: _ClassVar[int] + reason: StalledReason + description: str + def __init__(self, reason: _Optional[_Union[StalledReason, str]] = ..., description: _Optional[str] = ...) -> None: ... + class EntityOperationSignaledEvent(_message.Message): __slots__ = ("requestId", "operation", "scheduledTime", "input", "targetInstanceId") REQUESTID_FIELD_NUMBER: _ClassVar[int] @@ -394,7 +433,7 @@ class EntityLockGrantedEvent(_message.Message): def __init__(self, criticalSectionId: _Optional[str] = ...) -> None: ... class HistoryEvent(_message.Message): - __slots__ = ("eventId", "timestamp", "executionStarted", "executionCompleted", "executionTerminated", "taskScheduled", "taskCompleted", "taskFailed", "subOrchestrationInstanceCreated", "subOrchestrationInstanceCompleted", "subOrchestrationInstanceFailed", "timerCreated", "timerFired", "orchestratorStarted", "orchestratorCompleted", "eventSent", "eventRaised", "genericEvent", "historyState", "continueAsNew", "executionSuspended", "executionResumed", "entityOperationSignaled", "entityOperationCalled", "entityOperationCompleted", "entityOperationFailed", "entityLockRequested", "entityLockGranted", "entityUnlockSent", "router") + __slots__ = ("eventId", "timestamp", "executionStarted", "executionCompleted", "executionTerminated", "taskScheduled", "taskCompleted", "taskFailed", "subOrchestrationInstanceCreated", "subOrchestrationInstanceCompleted", "subOrchestrationInstanceFailed", "timerCreated", "timerFired", "orchestratorStarted", "orchestratorCompleted", "eventSent", "eventRaised", "genericEvent", "historyState", "continueAsNew", "executionSuspended", "executionResumed", "entityOperationSignaled", "entityOperationCalled", "entityOperationCompleted", "entityOperationFailed", "entityLockRequested", "entityLockGranted", "entityUnlockSent", "executionStalled", "router") EVENTID_FIELD_NUMBER: _ClassVar[int] TIMESTAMP_FIELD_NUMBER: _ClassVar[int] EXECUTIONSTARTED_FIELD_NUMBER: _ClassVar[int] @@ -424,6 +463,7 @@ class HistoryEvent(_message.Message): ENTITYLOCKREQUESTED_FIELD_NUMBER: _ClassVar[int] ENTITYLOCKGRANTED_FIELD_NUMBER: _ClassVar[int] ENTITYUNLOCKSENT_FIELD_NUMBER: _ClassVar[int] + EXECUTIONSTALLED_FIELD_NUMBER: _ClassVar[int] ROUTER_FIELD_NUMBER: _ClassVar[int] eventId: int timestamp: _timestamp_pb2.Timestamp @@ -454,8 +494,9 @@ class HistoryEvent(_message.Message): entityLockRequested: EntityLockRequestedEvent entityLockGranted: EntityLockGrantedEvent entityUnlockSent: EntityUnlockSentEvent + executionStalled: ExecutionStalledEvent router: TaskRouter - def __init__(self, eventId: _Optional[int] = ..., timestamp: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ..., executionStarted: _Optional[_Union[ExecutionStartedEvent, _Mapping]] = ..., executionCompleted: _Optional[_Union[ExecutionCompletedEvent, _Mapping]] = ..., executionTerminated: _Optional[_Union[ExecutionTerminatedEvent, _Mapping]] = ..., taskScheduled: _Optional[_Union[TaskScheduledEvent, _Mapping]] = ..., taskCompleted: _Optional[_Union[TaskCompletedEvent, _Mapping]] = ..., taskFailed: _Optional[_Union[TaskFailedEvent, _Mapping]] = ..., subOrchestrationInstanceCreated: _Optional[_Union[SubOrchestrationInstanceCreatedEvent, _Mapping]] = ..., subOrchestrationInstanceCompleted: _Optional[_Union[SubOrchestrationInstanceCompletedEvent, _Mapping]] = ..., subOrchestrationInstanceFailed: _Optional[_Union[SubOrchestrationInstanceFailedEvent, _Mapping]] = ..., timerCreated: _Optional[_Union[TimerCreatedEvent, _Mapping]] = ..., timerFired: _Optional[_Union[TimerFiredEvent, _Mapping]] = ..., orchestratorStarted: _Optional[_Union[OrchestratorStartedEvent, _Mapping]] = ..., orchestratorCompleted: _Optional[_Union[OrchestratorCompletedEvent, _Mapping]] = ..., eventSent: _Optional[_Union[EventSentEvent, _Mapping]] = ..., eventRaised: _Optional[_Union[EventRaisedEvent, _Mapping]] = ..., genericEvent: _Optional[_Union[GenericEvent, _Mapping]] = ..., historyState: _Optional[_Union[HistoryStateEvent, _Mapping]] = ..., continueAsNew: _Optional[_Union[ContinueAsNewEvent, _Mapping]] = ..., executionSuspended: _Optional[_Union[ExecutionSuspendedEvent, _Mapping]] = ..., executionResumed: _Optional[_Union[ExecutionResumedEvent, _Mapping]] = ..., entityOperationSignaled: _Optional[_Union[EntityOperationSignaledEvent, _Mapping]] = ..., entityOperationCalled: _Optional[_Union[EntityOperationCalledEvent, _Mapping]] = ..., entityOperationCompleted: _Optional[_Union[EntityOperationCompletedEvent, _Mapping]] = ..., entityOperationFailed: _Optional[_Union[EntityOperationFailedEvent, _Mapping]] = ..., entityLockRequested: _Optional[_Union[EntityLockRequestedEvent, _Mapping]] = ..., entityLockGranted: _Optional[_Union[EntityLockGrantedEvent, _Mapping]] = ..., entityUnlockSent: _Optional[_Union[EntityUnlockSentEvent, _Mapping]] = ..., router: _Optional[_Union[TaskRouter, _Mapping]] = ...) -> None: ... + def __init__(self, eventId: _Optional[int] = ..., timestamp: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ..., executionStarted: _Optional[_Union[ExecutionStartedEvent, _Mapping]] = ..., executionCompleted: _Optional[_Union[ExecutionCompletedEvent, _Mapping]] = ..., executionTerminated: _Optional[_Union[ExecutionTerminatedEvent, _Mapping]] = ..., taskScheduled: _Optional[_Union[TaskScheduledEvent, _Mapping]] = ..., taskCompleted: _Optional[_Union[TaskCompletedEvent, _Mapping]] = ..., taskFailed: _Optional[_Union[TaskFailedEvent, _Mapping]] = ..., subOrchestrationInstanceCreated: _Optional[_Union[SubOrchestrationInstanceCreatedEvent, _Mapping]] = ..., subOrchestrationInstanceCompleted: _Optional[_Union[SubOrchestrationInstanceCompletedEvent, _Mapping]] = ..., subOrchestrationInstanceFailed: _Optional[_Union[SubOrchestrationInstanceFailedEvent, _Mapping]] = ..., timerCreated: _Optional[_Union[TimerCreatedEvent, _Mapping]] = ..., timerFired: _Optional[_Union[TimerFiredEvent, _Mapping]] = ..., orchestratorStarted: _Optional[_Union[OrchestratorStartedEvent, _Mapping]] = ..., orchestratorCompleted: _Optional[_Union[OrchestratorCompletedEvent, _Mapping]] = ..., eventSent: _Optional[_Union[EventSentEvent, _Mapping]] = ..., eventRaised: _Optional[_Union[EventRaisedEvent, _Mapping]] = ..., genericEvent: _Optional[_Union[GenericEvent, _Mapping]] = ..., historyState: _Optional[_Union[HistoryStateEvent, _Mapping]] = ..., continueAsNew: _Optional[_Union[ContinueAsNewEvent, _Mapping]] = ..., executionSuspended: _Optional[_Union[ExecutionSuspendedEvent, _Mapping]] = ..., executionResumed: _Optional[_Union[ExecutionResumedEvent, _Mapping]] = ..., entityOperationSignaled: _Optional[_Union[EntityOperationSignaledEvent, _Mapping]] = ..., entityOperationCalled: _Optional[_Union[EntityOperationCalledEvent, _Mapping]] = ..., entityOperationCompleted: _Optional[_Union[EntityOperationCompletedEvent, _Mapping]] = ..., entityOperationFailed: _Optional[_Union[EntityOperationFailedEvent, _Mapping]] = ..., entityLockRequested: _Optional[_Union[EntityLockRequestedEvent, _Mapping]] = ..., entityLockGranted: _Optional[_Union[EntityLockGrantedEvent, _Mapping]] = ..., entityUnlockSent: _Optional[_Union[EntityUnlockSentEvent, _Mapping]] = ..., executionStalled: _Optional[_Union[ExecutionStalledEvent, _Mapping]] = ..., router: _Optional[_Union[TaskRouter, _Mapping]] = ...) -> None: ... class ScheduleTaskAction(_message.Message): __slots__ = ("name", "version", "input", "router", "taskExecutionId") @@ -541,8 +582,12 @@ class SendEntityMessageAction(_message.Message): entityUnlockSent: EntityUnlockSentEvent def __init__(self, entityOperationSignaled: _Optional[_Union[EntityOperationSignaledEvent, _Mapping]] = ..., entityOperationCalled: _Optional[_Union[EntityOperationCalledEvent, _Mapping]] = ..., entityLockRequested: _Optional[_Union[EntityLockRequestedEvent, _Mapping]] = ..., entityUnlockSent: _Optional[_Union[EntityUnlockSentEvent, _Mapping]] = ...) -> None: ... +class OrchestratorVersionNotAvailableAction(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + class OrchestratorAction(_message.Message): - __slots__ = ("id", "scheduleTask", "createSubOrchestration", "createTimer", "sendEvent", "completeOrchestration", "terminateOrchestration", "sendEntityMessage", "router") + __slots__ = ("id", "scheduleTask", "createSubOrchestration", "createTimer", "sendEvent", "completeOrchestration", "terminateOrchestration", "sendEntityMessage", "orchestratorVersionNotAvailable", "router") ID_FIELD_NUMBER: _ClassVar[int] SCHEDULETASK_FIELD_NUMBER: _ClassVar[int] CREATESUBORCHESTRATION_FIELD_NUMBER: _ClassVar[int] @@ -551,6 +596,7 @@ class OrchestratorAction(_message.Message): COMPLETEORCHESTRATION_FIELD_NUMBER: _ClassVar[int] TERMINATEORCHESTRATION_FIELD_NUMBER: _ClassVar[int] SENDENTITYMESSAGE_FIELD_NUMBER: _ClassVar[int] + ORCHESTRATORVERSIONNOTAVAILABLE_FIELD_NUMBER: _ClassVar[int] ROUTER_FIELD_NUMBER: _ClassVar[int] id: int scheduleTask: ScheduleTaskAction @@ -560,8 +606,9 @@ class OrchestratorAction(_message.Message): completeOrchestration: CompleteOrchestrationAction terminateOrchestration: TerminateOrchestrationAction sendEntityMessage: SendEntityMessageAction + orchestratorVersionNotAvailable: OrchestratorVersionNotAvailableAction router: TaskRouter - def __init__(self, id: _Optional[int] = ..., scheduleTask: _Optional[_Union[ScheduleTaskAction, _Mapping]] = ..., createSubOrchestration: _Optional[_Union[CreateSubOrchestrationAction, _Mapping]] = ..., createTimer: _Optional[_Union[CreateTimerAction, _Mapping]] = ..., sendEvent: _Optional[_Union[SendEventAction, _Mapping]] = ..., completeOrchestration: _Optional[_Union[CompleteOrchestrationAction, _Mapping]] = ..., terminateOrchestration: _Optional[_Union[TerminateOrchestrationAction, _Mapping]] = ..., sendEntityMessage: _Optional[_Union[SendEntityMessageAction, _Mapping]] = ..., router: _Optional[_Union[TaskRouter, _Mapping]] = ...) -> None: ... + def __init__(self, id: _Optional[int] = ..., scheduleTask: _Optional[_Union[ScheduleTaskAction, _Mapping]] = ..., createSubOrchestration: _Optional[_Union[CreateSubOrchestrationAction, _Mapping]] = ..., createTimer: _Optional[_Union[CreateTimerAction, _Mapping]] = ..., sendEvent: _Optional[_Union[SendEventAction, _Mapping]] = ..., completeOrchestration: _Optional[_Union[CompleteOrchestrationAction, _Mapping]] = ..., terminateOrchestration: _Optional[_Union[TerminateOrchestrationAction, _Mapping]] = ..., sendEntityMessage: _Optional[_Union[SendEntityMessageAction, _Mapping]] = ..., orchestratorVersionNotAvailable: _Optional[_Union[OrchestratorVersionNotAvailableAction, _Mapping]] = ..., router: _Optional[_Union[TaskRouter, _Mapping]] = ...) -> None: ... class OrchestratorRequest(_message.Message): __slots__ = ("instanceId", "executionId", "pastEvents", "newEvents", "entityParameters", "requiresHistoryStreaming", "router") @@ -582,18 +629,20 @@ class OrchestratorRequest(_message.Message): def __init__(self, instanceId: _Optional[str] = ..., executionId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., pastEvents: _Optional[_Iterable[_Union[HistoryEvent, _Mapping]]] = ..., newEvents: _Optional[_Iterable[_Union[HistoryEvent, _Mapping]]] = ..., entityParameters: _Optional[_Union[OrchestratorEntityParameters, _Mapping]] = ..., requiresHistoryStreaming: bool = ..., router: _Optional[_Union[TaskRouter, _Mapping]] = ...) -> None: ... class OrchestratorResponse(_message.Message): - __slots__ = ("instanceId", "actions", "customStatus", "completionToken", "numEventsProcessed") + __slots__ = ("instanceId", "actions", "customStatus", "completionToken", "numEventsProcessed", "version") INSTANCEID_FIELD_NUMBER: _ClassVar[int] ACTIONS_FIELD_NUMBER: _ClassVar[int] CUSTOMSTATUS_FIELD_NUMBER: _ClassVar[int] COMPLETIONTOKEN_FIELD_NUMBER: _ClassVar[int] NUMEVENTSPROCESSED_FIELD_NUMBER: _ClassVar[int] + VERSION_FIELD_NUMBER: _ClassVar[int] instanceId: str actions: _containers.RepeatedCompositeFieldContainer[OrchestratorAction] customStatus: _wrappers_pb2.StringValue completionToken: str numEventsProcessed: _wrappers_pb2.Int32Value - def __init__(self, instanceId: _Optional[str] = ..., actions: _Optional[_Iterable[_Union[OrchestratorAction, _Mapping]]] = ..., customStatus: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., completionToken: _Optional[str] = ..., numEventsProcessed: _Optional[_Union[_wrappers_pb2.Int32Value, _Mapping]] = ...) -> None: ... + version: OrchestrationVersion + def __init__(self, instanceId: _Optional[str] = ..., actions: _Optional[_Iterable[_Union[OrchestratorAction, _Mapping]]] = ..., customStatus: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., completionToken: _Optional[str] = ..., numEventsProcessed: _Optional[_Union[_wrappers_pb2.Int32Value, _Mapping]] = ..., version: _Optional[_Union[OrchestrationVersion, _Mapping]] = ...) -> None: ... class CreateInstanceRequest(_message.Message): __slots__ = ("instanceId", "name", "version", "input", "scheduledStartTimestamp", "orchestrationIdReusePolicy", "executionId", "tags", "parentTraceContext") @@ -794,14 +843,16 @@ class QueryInstancesResponse(_message.Message): def __init__(self, orchestrationState: _Optional[_Iterable[_Union[OrchestrationState, _Mapping]]] = ..., continuationToken: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ...) -> None: ... class PurgeInstancesRequest(_message.Message): - __slots__ = ("instanceId", "purgeInstanceFilter", "recursive") + __slots__ = ("instanceId", "purgeInstanceFilter", "recursive", "force") INSTANCEID_FIELD_NUMBER: _ClassVar[int] PURGEINSTANCEFILTER_FIELD_NUMBER: _ClassVar[int] RECURSIVE_FIELD_NUMBER: _ClassVar[int] + FORCE_FIELD_NUMBER: _ClassVar[int] instanceId: str purgeInstanceFilter: PurgeInstanceFilter recursive: bool - def __init__(self, instanceId: _Optional[str] = ..., purgeInstanceFilter: _Optional[_Union[PurgeInstanceFilter, _Mapping]] = ..., recursive: bool = ...) -> None: ... + force: bool + def __init__(self, instanceId: _Optional[str] = ..., purgeInstanceFilter: _Optional[_Union[PurgeInstanceFilter, _Mapping]] = ..., recursive: bool = ..., force: bool = ...) -> None: ... class PurgeInstanceFilter(_message.Message): __slots__ = ("createdTimeFrom", "createdTimeTo", "runtimeStatus") @@ -1140,21 +1191,51 @@ class HistoryChunk(_message.Message): def __init__(self, events: _Optional[_Iterable[_Union[HistoryEvent, _Mapping]]] = ...) -> None: ... class RerunWorkflowFromEventRequest(_message.Message): - __slots__ = ("sourceInstanceID", "eventID", "newInstanceID", "input", "overwriteInput") + __slots__ = ("sourceInstanceID", "eventID", "newInstanceID", "input", "overwriteInput", "newChildWorkflowInstanceID") SOURCEINSTANCEID_FIELD_NUMBER: _ClassVar[int] EVENTID_FIELD_NUMBER: _ClassVar[int] NEWINSTANCEID_FIELD_NUMBER: _ClassVar[int] INPUT_FIELD_NUMBER: _ClassVar[int] OVERWRITEINPUT_FIELD_NUMBER: _ClassVar[int] + NEWCHILDWORKFLOWINSTANCEID_FIELD_NUMBER: _ClassVar[int] sourceInstanceID: str eventID: int newInstanceID: str input: _wrappers_pb2.StringValue overwriteInput: bool - def __init__(self, sourceInstanceID: _Optional[str] = ..., eventID: _Optional[int] = ..., newInstanceID: _Optional[str] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., overwriteInput: bool = ...) -> None: ... + newChildWorkflowInstanceID: str + def __init__(self, sourceInstanceID: _Optional[str] = ..., eventID: _Optional[int] = ..., newInstanceID: _Optional[str] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., overwriteInput: bool = ..., newChildWorkflowInstanceID: _Optional[str] = ...) -> None: ... class RerunWorkflowFromEventResponse(_message.Message): __slots__ = ("newInstanceID",) NEWINSTANCEID_FIELD_NUMBER: _ClassVar[int] newInstanceID: str def __init__(self, newInstanceID: _Optional[str] = ...) -> None: ... + +class ListInstanceIDsRequest(_message.Message): + __slots__ = ("continuationToken", "pageSize") + CONTINUATIONTOKEN_FIELD_NUMBER: _ClassVar[int] + PAGESIZE_FIELD_NUMBER: _ClassVar[int] + continuationToken: str + pageSize: int + def __init__(self, continuationToken: _Optional[str] = ..., pageSize: _Optional[int] = ...) -> None: ... + +class ListInstanceIDsResponse(_message.Message): + __slots__ = ("instanceIds", "continuationToken") + INSTANCEIDS_FIELD_NUMBER: _ClassVar[int] + CONTINUATIONTOKEN_FIELD_NUMBER: _ClassVar[int] + instanceIds: _containers.RepeatedScalarFieldContainer[str] + continuationToken: str + def __init__(self, instanceIds: _Optional[_Iterable[str]] = ..., continuationToken: _Optional[str] = ...) -> None: ... + +class GetInstanceHistoryRequest(_message.Message): + __slots__ = ("instanceId",) + INSTANCEID_FIELD_NUMBER: _ClassVar[int] + instanceId: str + def __init__(self, instanceId: _Optional[str] = ...) -> None: ... + +class GetInstanceHistoryResponse(_message.Message): + __slots__ = ("events",) + EVENTS_FIELD_NUMBER: _ClassVar[int] + events: _containers.RepeatedCompositeFieldContainer[HistoryEvent] + def __init__(self, events: _Optional[_Iterable[_Union[HistoryEvent, _Mapping]]] = ...) -> None: ... diff --git a/durabletask/internal/orchestrator_service_pb2_grpc.py b/durabletask/internal/orchestrator_service_pb2_grpc.py index 3342f3a..eaca21c 100644 --- a/durabletask/internal/orchestrator_service_pb2_grpc.py +++ b/durabletask/internal/orchestrator_service_pb2_grpc.py @@ -170,6 +170,16 @@ def __init__(self, channel): request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RerunWorkflowFromEventRequest.SerializeToString, response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RerunWorkflowFromEventResponse.FromString, _registered_method=True) + self.ListInstanceIDs = channel.unary_unary( + '/TaskHubSidecarService/ListInstanceIDs', + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.ListInstanceIDsRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.ListInstanceIDsResponse.FromString, + _registered_method=True) + self.GetInstanceHistory = channel.unary_unary( + '/TaskHubSidecarService/GetInstanceHistory', + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceHistoryRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceHistoryResponse.FromString, + _registered_method=True) class TaskHubSidecarServiceServicer(object): @@ -360,6 +370,18 @@ def RerunWorkflowFromEvent(self, request, context): context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') + def ListInstanceIDs(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetInstanceHistory(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + def add_TaskHubSidecarServiceServicer_to_server(servicer, server): rpc_method_handlers = { @@ -498,6 +520,16 @@ def add_TaskHubSidecarServiceServicer_to_server(servicer, server): request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RerunWorkflowFromEventRequest.FromString, response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RerunWorkflowFromEventResponse.SerializeToString, ), + 'ListInstanceIDs': grpc.unary_unary_rpc_method_handler( + servicer.ListInstanceIDs, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.ListInstanceIDsRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.ListInstanceIDsResponse.SerializeToString, + ), + 'GetInstanceHistory': grpc.unary_unary_rpc_method_handler( + servicer.GetInstanceHistory, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceHistoryRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceHistoryResponse.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( 'TaskHubSidecarService', rpc_method_handlers) @@ -1237,3 +1269,57 @@ def RerunWorkflowFromEvent(request, timeout, metadata, _registered_method=True) + + @staticmethod + def ListInstanceIDs(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/TaskHubSidecarService/ListInstanceIDs', + durabletask_dot_internal_dot_orchestrator__service__pb2.ListInstanceIDsRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.ListInstanceIDsResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def GetInstanceHistory(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/TaskHubSidecarService/GetInstanceHistory', + durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceHistoryRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceHistoryResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) diff --git a/durabletask/task.py b/durabletask/task.py index 0b27b6f..3eaf9a2 100644 --- a/durabletask/task.py +++ b/durabletask/task.py @@ -189,6 +189,22 @@ def continue_as_new(self, new_input: Any, *, save_events: bool = False) -> None: """ pass + @abstractmethod + def is_patched(self, patch_name: str) -> bool: + """Check if the given patch name can be applied to the orchestration. + + Parameters + ---------- + patch_name : str + The name of the patch to check. + + Returns + ------- + bool + True if the given patch name can be applied to the orchestration, False otherwise. + """ + pass + class FailureDetails: def __init__(self, message: str, error_type: str, stack_trace: Optional[str]): diff --git a/durabletask/worker.py b/durabletask/worker.py index 29d67fc..bbe6c6d 100644 --- a/durabletask/worker.py +++ b/durabletask/worker.py @@ -25,6 +25,8 @@ TInput = TypeVar("TInput") TOutput = TypeVar("TOutput") +class VersionNotRegisteredException(Exception): + pass class ConcurrencyOptions: """Configuration options for controlling concurrency of different work item types and the thread pool size. @@ -74,30 +76,58 @@ def __init__( class _Registry: orchestrators: dict[str, task.Orchestrator] + versioned_orchestrators: dict[str, dict[str, task.Orchestrator]] + latest_versioned_orchestrators_version_name: dict[str, str] activities: dict[str, task.Activity] def __init__(self): self.orchestrators = {} + self.versioned_orchestrators = {} + self.latest_versioned_orchestrators_version_name = {} self.activities = {} - def add_orchestrator(self, fn: task.Orchestrator) -> str: + def add_orchestrator(self, fn: task.Orchestrator, version_name: Optional[str] = None, is_latest: bool = False) -> str: if fn is None: raise ValueError("An orchestrator function argument is required.") name = task.get_name(fn) - self.add_named_orchestrator(name, fn) + self.add_named_orchestrator(name, fn, version_name, is_latest) return name - def add_named_orchestrator(self, name: str, fn: task.Orchestrator) -> None: + def add_named_orchestrator(self, name: str, fn: task.Orchestrator, version_name: Optional[str] = None, is_latest: bool = False) -> None: if not name: raise ValueError("A non-empty orchestrator name is required.") + + if version_name is None: + if name in self.orchestrators: + raise ValueError(f"A '{name}' orchestrator already exists.") + self.orchestrators[name] = fn + else: + if name not in self.versioned_orchestrators: + self.versioned_orchestrators[name] = {} + if version_name in self.versioned_orchestrators[name]: + raise ValueError(f"The version '{version_name}' of '{name}' orchestrator already exists.") + self.versioned_orchestrators[name][version_name] = fn + if is_latest: + self.latest_versioned_orchestrators_version_name[name] = version_name + + def get_orchestrator(self, name: str, version_name: Optional[str] = None) -> Optional[tuple[task.Orchestrator, str]]: if name in self.orchestrators: - raise ValueError(f"A '{name}' orchestrator already exists.") + return self.orchestrators.get(name), None - self.orchestrators[name] = fn + if name in self.versioned_orchestrators: + if version_name: + version_to_use = version_name + elif name in self.latest_versioned_orchestrators_version_name: + version_to_use = self.latest_versioned_orchestrators_version_name[name] + else: + return None, None + + if version_to_use not in self.versioned_orchestrators[name]: + raise VersionNotRegisteredException + return self.versioned_orchestrators[name].get(version_to_use), version_to_use - def get_orchestrator(self, name: str) -> Optional[task.Orchestrator]: - return self.orchestrators.get(name) + return None, None def add_activity(self, fn: task.Activity) -> str: if fn is None: @@ -540,11 +570,22 @@ def _execute_orchestrator( try: executor = _OrchestrationExecutor(self._registry, self._logger) result = executor.execute(req.instanceId, req.pastEvents, req.newEvents) + + version = None + if result.version_name: + version = version or pb.OrchestrationVersion() + version.name = result.version_name + if result.patches: + version = version or pb.OrchestrationVersion() + version.patches.extend(result.patches) + + res = pb.OrchestratorResponse( instanceId=req.instanceId, actions=result.actions, customStatus=ph.get_string_value(result.encoded_custom_status), completionToken=completionToken, + version=version, ) except Exception as ex: self._logger.exception( @@ -629,6 +670,11 @@ def __init__(self, instance_id: str): self._new_input: Optional[Any] = None self._save_events = False self._encoded_custom_status: Optional[str] = None + self._orchestrator_started_version: Optional[pb.OrchestrationVersion] = None + self._version_name: Optional[str] = None + self._history_patches: dict[str, bool] = {} + self._applied_patches: dict[str, bool] = {} + self._encountered_patches: list[str] = [] def run(self, generator: Generator[task.Task, Any, Any]): self._generator = generator @@ -705,6 +751,14 @@ def set_failed(self, ex: Exception): ) self._pending_actions[action.id] = action + + def set_version_not_registered(self): + self._pending_actions.clear() + self._completion_status = pb.ORCHESTRATION_STATUS_STALLED + action = ph.new_orchestrator_version_not_available_action(self.next_sequence_number()) + self._pending_actions[action.id] = action + + def set_continued_as_new(self, new_input: Any, save_events: bool): if self._is_complete: return @@ -916,13 +970,38 @@ def continue_as_new(self, new_input, *, save_events: bool = False) -> None: self.set_continued_as_new(new_input, save_events) + def is_patched(self, patch_name: str) -> bool: + is_patched = self._is_patched(patch_name) + if is_patched: + self._encountered_patches.append(patch_name) + return is_patched + + def _is_patched(self, patch_name: str) -> bool: + if patch_name in self._applied_patches: + return self._applied_patches[patch_name] + if patch_name in self._history_patches: + self._applied_patches[patch_name] = True + return True + + if self._is_replaying: + self._applied_patches[patch_name] = False + return False + + self._applied_patches[patch_name] = True + return True + + class ExecutionResults: actions: list[pb.OrchestratorAction] encoded_custom_status: Optional[str] + version_name: Optional[str] + patches: Optional[list[str]] - def __init__(self, actions: list[pb.OrchestratorAction], encoded_custom_status: Optional[str]): + def __init__(self, actions: list[pb.OrchestratorAction], encoded_custom_status: Optional[str], version_name: Optional[str] = None, patches: Optional[list[str]] = None): self.actions = actions self.encoded_custom_status = encoded_custom_status + self.version_name = version_name + self.patches = patches class _OrchestrationExecutor: @@ -965,6 +1044,8 @@ def execute( for new_event in new_events: self.process_event(ctx, new_event) + except VersionNotRegisteredException: + ctx.set_version_not_registered() except Exception as ex: # Unhandled exceptions fail the orchestration ctx.set_failed(ex) @@ -989,7 +1070,12 @@ def execute( self._logger.debug( f"{instance_id}: Returning {len(actions)} action(s): {_get_action_summary(actions)}" ) - return ExecutionResults(actions=actions, encoded_custom_status=ctx._encoded_custom_status) + return ExecutionResults( + actions=actions, + encoded_custom_status=ctx._encoded_custom_status, + version_name=getattr(ctx, '_version_name', None), + patches=ctx._encountered_patches + ) def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEvent) -> None: if self._is_suspended and _is_suspendable(event): @@ -1001,19 +1087,32 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven try: if event.HasField("orchestratorStarted"): ctx.current_utc_datetime = event.timestamp.ToDatetime() + ctx._orchestrator_started_version = event.orchestratorStarted.version elif event.HasField("executionStarted"): if event.router.targetAppID: ctx._app_id = event.router.targetAppID else: ctx._app_id = event.router.sourceAppID + if ctx._orchestrator_started_version and ctx._orchestrator_started_version.patches: + ctx._history_patches = {patch: True for patch in ctx._orchestrator_started_version.patches} + + version_name = None + if ctx._orchestrator_started_version and ctx._orchestrator_started_version.name: + version_name = ctx._orchestrator_started_version.name + + # TODO: Check if we already started the orchestration - fn = self._registry.get_orchestrator(event.executionStarted.name) + fn, version_used = self._registry.get_orchestrator(event.executionStarted.name, version_name=version_name) + if fn is None: raise OrchestratorNotRegisteredError( f"A '{event.executionStarted.name}' orchestrator was not registered." ) + if version_used is not None: + ctx._version_name = version_used + # deserialize the input, if any input = None if ( @@ -1280,6 +1379,9 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven pb.ORCHESTRATION_STATUS_TERMINATED, is_result_encoded=True, ) + elif event.HasField("executionStalled"): + # Nothing to do + pass else: eventType = event.WhichOneof("eventType") raise task.OrchestrationStateError( diff --git a/tests/durabletask/test_registry.py b/tests/durabletask/test_registry.py index 743330c..b5fcfa9 100644 --- a/tests/durabletask/test_registry.py +++ b/tests/durabletask/test_registry.py @@ -92,7 +92,7 @@ def dummy_orchestrator(ctx, input): return "done" name = registry.add_orchestrator(dummy_orchestrator) - retrieved = registry.get_orchestrator(name) + retrieved, _ = registry.get_orchestrator(name) assert retrieved is dummy_orchestrator @@ -101,7 +101,7 @@ def test_registry_get_orchestrator_not_exists(): """Test retrieving a non-existent orchestrator returns None.""" registry = worker._Registry() - retrieved = registry.get_orchestrator("non_existent") + retrieved, _ = registry.get_orchestrator("non_existent") assert retrieved is None @@ -142,8 +142,10 @@ def orchestrator2(ctx, input): name2 = registry.add_orchestrator(orchestrator2) assert name1 != name2 - assert registry.get_orchestrator(name1) is orchestrator1 - assert registry.get_orchestrator(name2) is orchestrator2 + orchestrator1, _ = registry.get_orchestrator(name1) + orchestrator2, _ = registry.get_orchestrator(name2) + assert orchestrator1 is not None + assert orchestrator2 is not None def test_registry_add_multiple_activities(): @@ -162,3 +164,42 @@ def activity2(ctx, input): assert name1 != name2 assert registry.get_activity(name1) is activity1 assert registry.get_activity(name2) is activity2 + +def test_registry_add_named_versioned_orchestrators(): + """Test adding versioned orchestrators.""" + registry = worker._Registry() + + def orchestrator1(ctx, input): + return "one" + + def orchestrator2(ctx, input): + return "two" + + def orchestrator3(ctx, input): + return "two" + + registry.add_named_orchestrator(name="orchestrator", fn=orchestrator1, version_name="v1") + registry.add_named_orchestrator(name="orchestrator", fn=orchestrator2, version_name="v2", is_latest=True) + registry.add_named_orchestrator(name="orchestrator", fn=orchestrator3, version_name="v3") + + orquestrator, version = registry.get_orchestrator(name="orchestrator") + assert orquestrator is orchestrator2 + assert version == "v2" + + orquestrator, version = registry.get_orchestrator(name="orchestrator", version_name="v1") + assert orquestrator is orchestrator1 + assert version == "v1" + + orquestrator, version = registry.get_orchestrator(name="orchestrator", version_name="v2") + assert orquestrator is orchestrator2 + assert version == "v2" + + orquestrator, version = registry.get_orchestrator(name="orchestrator", version_name="v3") + assert orquestrator is orchestrator3 + assert version == "v3" + + with pytest.raises(worker.VersionNotRegisteredException): + registry.get_orchestrator(name="orchestrator", version_name="v4") + + orquestrator, _ = registry.get_orchestrator(name="non-existent") + assert orquestrator is None From c53b244742dd5a9617cc9965363fb4ebd2186f65 Mon Sep 17 00:00:00 2001 From: Albert Callarisa Date: Fri, 23 Jan 2026 14:19:49 +0100 Subject: [PATCH 81/81] handle orchestrator version properly Signed-off-by: Albert Callarisa --- durabletask/worker.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/durabletask/worker.py b/durabletask/worker.py index bbe6c6d..1234387 100644 --- a/durabletask/worker.py +++ b/durabletask/worker.py @@ -670,7 +670,7 @@ def __init__(self, instance_id: str): self._new_input: Optional[Any] = None self._save_events = False self._encoded_custom_status: Optional[str] = None - self._orchestrator_started_version: Optional[pb.OrchestrationVersion] = None + self._orchestrator_version_name: Optional[str] = None self._version_name: Optional[str] = None self._history_patches: dict[str, bool] = {} self._applied_patches: dict[str, bool] = {} @@ -1087,19 +1087,20 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven try: if event.HasField("orchestratorStarted"): ctx.current_utc_datetime = event.timestamp.ToDatetime() - ctx._orchestrator_started_version = event.orchestratorStarted.version + if event.orchestratorStarted.version: + if event.orchestratorStarted.version.name: + ctx._orchestrator_version_name = event.orchestratorStarted.version.name + for patch in event.orchestratorStarted.version.patches: + ctx._history_patches[patch] = True elif event.HasField("executionStarted"): if event.router.targetAppID: ctx._app_id = event.router.targetAppID else: ctx._app_id = event.router.sourceAppID - if ctx._orchestrator_started_version and ctx._orchestrator_started_version.patches: - ctx._history_patches = {patch: True for patch in ctx._orchestrator_started_version.patches} - version_name = None - if ctx._orchestrator_started_version and ctx._orchestrator_started_version.name: - version_name = ctx._orchestrator_started_version.name + if ctx._orchestrator_version_name: + version_name = ctx._orchestrator_version_name # TODO: Check if we already started the orchestration