diff --git a/.flake8 b/.flake8 deleted file mode 100644 index ecc399c3..00000000 --- a/.flake8 +++ /dev/null @@ -1,6 +0,0 @@ -[flake8] -ignore = E501,C901 -exclude = - .git - *_pb2* - __pycache__ \ No newline at end of file diff --git a/.github/workflows/pr-validation.yml b/.github/workflows/pr-validation.yml index 4c09e6b4..3ed790b0 100644 --- a/.github/workflows/pr-validation.yml +++ b/.github/workflows/pr-validation.yml @@ -6,6 +6,7 @@ name: Build Validation on: push: branches: [ "main" ] + tags: ["v*"] pull_request: branches: [ "main" ] @@ -16,22 +17,57 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.8", "3.9", "3.10", "3.11"] + python-version: ["3.10", "3.11", "3.12", "3.13", "3.14"] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v3 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - name: Install dependencies run: | python -m pip install --upgrade pip - pip install flake8 pytest - pip install -r requirements.txt - - name: Lint with flake8 + pip install .[dev] + - name: Lint with ruff run: | - flake8 . --count --show-source --statistics --exit-zero + ruff check - name: Pytest unit tests run: | - pytest -m "not e2e" --verbose + tox -e py${{ matrix.python-version }} + # Sidecar for running e2e tests requires Go SDK + - name: Install Go SDK + uses: actions/setup-go@v5 + with: + go-version: 'stable' + # Install and run the durabletask-go sidecar for running e2e tests + - name: Pytest e2e tests + run: | + # TODO: use dapr run instead of durabletask-go as it provides a more reliable sidecar behaviorfor e2e tests + go install github.com/dapr/durabletask-go@main + durabletask-go --port 4001 & + tox -e py${{ matrix.python-version }}-e2e + publish: + needs: build + if: startswith(github.ref, 'refs/tags/v') + runs-on: ubuntu-latest + env: + TWINE_USERNAME: "__token__" + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Set up Python 3.11 + uses: actions/setup-python@v5 + with: + python-version: 3.11 + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install setuptools wheel twine build + - name: Build and publish Dapr Python SDK + env: + TWINE_PASSWORD: ${{ secrets.PYPI_UPLOAD_PASS }} + run: | + python -m build + twine upload dist/* diff --git a/.gitmodules b/.gitmodules deleted file mode 100644 index b3715165..00000000 --- a/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "submodules/durabletask-protobuf"] - path = submodules/durabletask-protobuf - url = https://github.com/microsoft/durabletask-protobuf diff --git a/.vscode/settings.json b/.vscode/settings.json index d737b0b1..1c929acb 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -3,7 +3,7 @@ "editor.defaultFormatter": "ms-python.autopep8", "editor.formatOnSave": true, "editor.codeActionsOnSave": { - "source.organizeImports": true, + "source.organizeImports": "explicit" }, "editor.rulers": [ 119 @@ -29,5 +29,6 @@ "coverage.xml", "jacoco.xml", "coverage.cobertura.xml" - ] + ], + "makefile.configureOnOpen": false } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index fc4b3d20..376221ee 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,13 +5,29 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -## v0.2.0 (Unreleased) +## v0.3.0 + +### New + +- Added `ConcurrencyOptions` class for fine-grained concurrency control with separate limits for activities and orchestrations. The thread pool worker count can also be configured. + +### Fixed + +- Fixed an issue where a worker could not recover after its connection was interrupted or severed + +## v0.2.1 ### New - Added `set_custom_status` orchestrator API ([#31](https://github.com/microsoft/durabletask-python/pull/31)) - contributed by [@famarting](https://github.com/famarting) - Added `purge_orchestration` client API ([#34](https://github.com/microsoft/durabletask-python/pull/34)) - contributed by [@famarting](https://github.com/famarting) +### Changes + +- Protos are compiled with gRPC 1.62.3 / protobuf 3.25.X instead of the latest release. This ensures compatibility with a wider range of grpcio versions for better compatibility with other packages / libraries ([#36](https://github.com/microsoft/durabletask-python/pull/36)) - by [@berndverst](https://github.com/berndverst) +- Http and grpc protocols and their secure variants are stripped from the host name parameter if provided. Secure mode is enabled if the protocol provided is https or grpcs ([#38](https://github.com/microsoft/durabletask-python/pull/38) - by [@berndverst)(https://github.com/berndverst) +- Improve ProtoGen by downloading proto file directly instead of using submodule ([#39](https://github.com/microsoft/durabletask-python/pull/39) - by [@berndverst](https://github.com/berndverst) + ### Updates - Updated `durabletask-protobuf` submodule reference to latest diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 00000000..0ca6992b --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1,2 @@ +# These owners are the maintainers and approvers of this repo +* @dapr/maintainers-python-sdk @dapr/approvers-python-sdk \ No newline at end of file diff --git a/Makefile b/Makefile index 16b883e0..be0a3174 100644 --- a/Makefile +++ b/Makefile @@ -7,11 +7,22 @@ test-unit: test-e2e: pytest -m e2e --verbose +coverage-clean: + rm -f .coverage .coverage.* coverage.xml + +coverage-all: coverage-clean + pytest -m "not e2e" --durations=0 --cov=durabletask --cov-branch --cov-report=term-missing --cov-report=xml + pytest -m e2e --durations=0 --cov=durabletask --cov-branch --cov-report=term-missing --cov-report=xml --cov-append + install: python3 -m pip install . gen-proto: -# NOTE: There is currently a hand-edit that we make to the generated orchestrator_service_pb2.py file after it's generated to help resolve import problems. - python3 -m grpc_tools.protoc --proto_path=./submodules/durabletask-protobuf/protos --python_out=./durabletask/internal --pyi_out=./durabletask/internal --grpc_python_out=./durabletask/internal orchestrator_service.proto + curl -o durabletask/internal/orchestrator_service.proto https://raw.githubusercontent.com/dapr/durabletask-protobuf/refs/heads/main/protos/orchestrator_service.proto + curl -H "Accept: application/vnd.github.v3+json" "https://api.github.com/repos/dapr/durabletask-protobuf/commits?path=protos/orchestrator_service.proto&sha=main&per_page=1" | jq -r '.[0].sha' > durabletask/internal/PROTO_SOURCE_COMMIT_HASH + # NOTE: remember to check/update pyproject.toml protobuf version to follow https://github.com/grpc/grpc/blob/v{{VERSION GRPC IO TOOL BELLOW}}/tools/distrib/python/grpcio_tools/setup.py + pip install .[dev] + python3 -m grpc_tools.protoc --proto_path=. --python_out=. --pyi_out=. --grpc_python_out=. ./durabletask/internal/orchestrator_service.proto + rm durabletask/internal/*.proto -.PHONY: init test-unit test-e2e gen-proto install +.PHONY: init test-unit test-e2e coverage-clean coverage-all gen-proto install diff --git a/README.md b/README.md index 22b3c44d..d4604e0f 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# Durable Task Client SDK for Python +# Durable Task Client SDK for Python (Dapr fork) [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT) [![Build Validation](https://github.com/microsoft/durabletask-python/actions/workflows/pr-validation.yml/badge.svg)](https://github.com/microsoft/durabletask-python/actions/workflows/pr-validation.yml) @@ -11,6 +11,38 @@ This repo contains a Python client SDK for use with the [Durable Task Framework > Note that this project is **not** currently affiliated with the [Durable Functions](https://docs.microsoft.com/azure/azure-functions/durable/durable-functions-overview) project for Azure Functions. If you are looking for a Python SDK for Durable Functions, please see [this repo](https://github.com/Azure/azure-functions-durable-python). +## Minimal worker setup + +To execute orchestrations and activities you must run a worker that connects to the Dapr Workflow sidecar and dispatches work on background threads: + +```python +from durabletask.worker import TaskHubGrpcWorker + +worker = TaskHubGrpcWorker(host_address="localhost:4001") + +worker.add_orchestrator(say_hello) +worker.add_activity(hello_activity) + +try: + worker.start() + # Worker runs in the background and processes work until stopped +finally: + worker.stop() +``` + +Always stop the worker when you're finished. The worker keeps polling threads alive; if you skip `stop()` they continue running and can prevent your process from shutting down cleanly after failures. You can rely on the context manager form to guarantee cleanup: + +```python +from durabletask.worker import TaskHubGrpcWorker + +with TaskHubGrpcWorker(host_address="localhost:4001") as worker: + worker.add_orchestrator(say_hello) + worker.add_activity(hello_activity) + worker.start() + # worker.stop() is called automatically on exit +``` + + ## Supported patterns The following orchestration patterns are currently supported. @@ -126,15 +158,102 @@ Orchestrations can be continued as new using the `continue_as_new` API. This API Orchestrations can be suspended using the `suspend_orchestration` client API and will remain suspended until resumed using the `resume_orchestration` client API. A suspended orchestration will stop processing new events, but will continue to buffer any that happen to arrive until resumed, ensuring that no data is lost. An orchestration can also be terminated using the `terminate_orchestration` client API. Terminated orchestrations will stop processing new events and will discard any buffered events. -### Retry policies (TODO) +### Retry policies Orchestrations can specify retry policies for activities and sub-orchestrations. These policies control how many times and how frequently an activity or sub-orchestration will be retried in the event of a transient error. +#### Creating a retry policy + +```python +from datetime import timedelta +from durabletask import task + +retry_policy = task.RetryPolicy( + first_retry_interval=timedelta(seconds=1), # Initial delay before first retry + max_number_of_attempts=5, # Maximum total attempts (includes first attempt) + backoff_coefficient=2.0, # Exponential backoff multiplier (must be >= 1) + max_retry_interval=timedelta(seconds=30), # Cap on retry delay + retry_timeout=timedelta(minutes=5), # Total time limit for all retries (optional) +) +``` + +**Notes:** +- `max_number_of_attempts` **includes the initial attempt**. For example, `max_number_of_attempts=5` means 1 initial attempt + up to 4 retries. +- `retry_timeout` is optional. If omitted or set to `None`, retries continue until `max_number_of_attempts` is reached. +- `backoff_coefficient` controls exponential backoff: delay = `first_retry_interval * (backoff_coefficient ^ retry_number)`, capped by `max_retry_interval`. +- `non_retryable_error_types` (optional) can specify additional exception types to treat as non-retryable (e.g., `[ValueError, TypeError]`). `NonRetryableError` is always non-retryable regardless of this setting. + +#### Using retry policies + +Apply retry policies to activities or sub-orchestrations: + +```python +def my_orchestrator(ctx: task.OrchestrationContext, input): + # Retry an activity + result = yield ctx.call_activity(my_activity, input=data, retry_policy=retry_policy) + + # Retry a sub-orchestration + result = yield ctx.call_sub_orchestrator(child_orchestrator, input=data, retry_policy=retry_policy) +``` + +#### Non-retryable errors + +For errors that should not be retried (e.g., validation failures, permanent errors), raise a `NonRetryableError`: + +```python +from durabletask.task import NonRetryableError + +def my_activity(ctx: task.ActivityContext, input): + if input is None: + # This error will bypass retry logic and fail immediately + raise NonRetryableError("Input cannot be None") + + # Transient errors (network, timeouts, etc.) will be retried + return call_external_service(input) +``` + +Even with a retry policy configured, `NonRetryableError` will fail immediately without retrying. + +#### Error type matching behavior + +**Important:** Error type matching uses **exact class name comparison**, not `isinstance()` checks. This is because exception objects are serialized to gRPC protobuf messages, where only the class name (as a string) survives serialization. + +**Key implications:** + +- **Not inheritance-aware**: If you specify `ValueError` in `non_retryable_error_types`, it will only match exceptions with the exact class name `"ValueError"`. A custom subclass like `CustomValueError(ValueError)` will NOT match. +- **Workaround**: List all exception types explicitly, including subclasses you want to handle. +- **Built-in exception**: `NonRetryableError` is always treated as non-retryable, matched by the name `"NonRetryableError"`. + +**Example:** + +```python +from datetime import timedelta +from durabletask import task + +# Custom exception hierarchy +class ValidationError(ValueError): + pass + +# This policy ONLY matches exact "ValueError" by name +retry_policy = task.RetryPolicy( + first_retry_interval=timedelta(seconds=1), + max_number_of_attempts=3, + non_retryable_error_types=[ValueError] # Won't match ValidationError subclass! +) + +# To handle both, list them explicitly: +retry_policy = task.RetryPolicy( + first_retry_interval=timedelta(seconds=1), + max_number_of_attempts=3, + non_retryable_error_types=[ValueError, ValidationError] # Both converted to name strings +) +``` + ## Getting Started ### Prerequisites -- Python 3.8 +- Python 3.9 - A Durable Task-compatible sidecar, like [Dapr Workflow](https://docs.dapr.io/developing-applications/building-blocks/workflow/workflow-overview/) ### Installing the Durable Task Python client SDK @@ -161,38 +280,46 @@ The following is more information about how to develop this project. Note that d ### Generating protobufs -Protobuf definitions are stored in the [./submodules/durabletask-proto](./submodules/durabletask-proto) directory, which is a submodule. To update the submodule, run the following command from the project root: - -```sh -git submodule update --init -``` - -Once the submodule is available, the corresponding source code can be regenerated using the following command from the project root: - ```sh make gen-proto ``` +This will download the `orchestrator_service.proto` from the `microsoft/durabletask-protobuf` repo and compile it using `grpcio-tools`. The version of the source proto file that was downloaded can be found in the file `durabletask/internal/PROTO_SOURCE_COMMIT_HASH`. + ### Running unit tests -Unit tests can be run using the following command from the project root. Unit tests _don't_ require a sidecar process to be running. +Unit tests can be run using the following command from the project root. +Unit tests _don't_ require a sidecar process to be running. + +To run on a specific python version (eg: 3.11), run the following command from the project root: ```sh -make test-unit +tox -e py311 ``` ### Running E2E tests -The E2E (end-to-end) tests require a sidecar process to be running. You can use the Dapr sidecar for this or run a Durable Task test sidecar using the following `docker` command: +The E2E (end-to-end) tests require a sidecar process to be running. + +For non-multi app activities test you can use the Durable Task test sidecar using the following command: ```sh -docker run --name durabletask-sidecar -p 4001:4001 --env 'DURABLETASK_SIDECAR_LOGLEVEL=Debug' --rm cgillum/durabletask-sidecar:latest start --backend Emulator +go install github.com/dapr/durabletask-go@main +durabletask-go --port 4001 +``` + +Certain aspects like multi-app activities require the full dapr runtime to be running. + +```shell +dapr init || true + +dapr run --app-id test-app --dapr-grpc-port 4001 --resources-path ./examples/components/ ``` -To run the E2E tests, run the following command from the project root: +To run the E2E tests on a specific python version (eg: 3.11), run the following command from the project root: ```sh -make test-e2e +tox -e py311 -- e2e ``` ## Contributing @@ -211,8 +338,8 @@ contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additio ## Trademarks -This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft -trademarks or logos is subject to and must follow +This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft +trademarks or logos is subject to and must follow [Microsoft's Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks/usage/general). Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. Any use of third-party trademarks or logos are subject to those third-party's policies. diff --git a/dev-requirements.txt b/dev-requirements.txt new file mode 100644 index 00000000..ba589ab7 --- /dev/null +++ b/dev-requirements.txt @@ -0,0 +1 @@ +grpcio-tools==1.62.3 # 1.62.X is the latest version before protobuf 1.26.X is used which has breaking changes for Python # supports protobuf 6.x and aligns with generated code \ No newline at end of file diff --git a/durabletask/__init__.py b/durabletask/__init__.py index a37823c7..78ea7ca2 100644 --- a/durabletask/__init__.py +++ b/durabletask/__init__.py @@ -3,5 +3,4 @@ """Durable Task SDK for Python""" - PACKAGE_NAME = "durabletask" diff --git a/durabletask/aio/__init__.py b/durabletask/aio/__init__.py new file mode 100644 index 00000000..d4462280 --- /dev/null +++ b/durabletask/aio/__init__.py @@ -0,0 +1,5 @@ +from .client import AsyncTaskHubGrpcClient + +__all__ = [ + "AsyncTaskHubGrpcClient", +] diff --git a/durabletask/aio/client.py b/durabletask/aio/client.py new file mode 100644 index 00000000..9b93b96d --- /dev/null +++ b/durabletask/aio/client.py @@ -0,0 +1,196 @@ +# Copyright (c) The Dapr Authors. +# Licensed under the MIT License. + +import logging +import uuid +from datetime import datetime +from typing import Any, Optional, Sequence, Union + +import grpc +from google.protobuf import wrappers_pb2 + +import durabletask.internal.helpers as helpers +import durabletask.internal.orchestrator_service_pb2 as pb +import durabletask.internal.orchestrator_service_pb2_grpc as stubs +import durabletask.internal.shared as shared +from durabletask import task +from durabletask.aio.internal.grpc_interceptor import DefaultClientInterceptorImpl +from durabletask.aio.internal.shared import ClientInterceptor, get_grpc_aio_channel +from durabletask.client import ( + OrchestrationState, + OrchestrationStatus, + TInput, + TOutput, + new_orchestration_state, +) + + +class AsyncTaskHubGrpcClient: + def __init__( + self, + *, + host_address: Optional[str] = None, + metadata: Optional[list[tuple[str, str]]] = None, + log_handler: Optional[logging.Handler] = None, + log_formatter: Optional[logging.Formatter] = None, + secure_channel: bool = False, + interceptors: Optional[Sequence[ClientInterceptor]] = None, + channel_options: Optional[Sequence[tuple[str, Any]]] = None, + ): + if interceptors is not None: + interceptors = list(interceptors) + if metadata is not None: + interceptors.append(DefaultClientInterceptorImpl(metadata)) + elif metadata is not None: + interceptors = [DefaultClientInterceptorImpl(metadata)] + else: + interceptors = None + + channel = get_grpc_aio_channel( + host_address=host_address, + secure_channel=secure_channel, + interceptors=interceptors, + options=channel_options, + ) + self._channel = channel + self._stub = stubs.TaskHubSidecarServiceStub(channel) + self._logger = shared.get_logger("client", log_handler, log_formatter) + + async def aclose(self): + await self._channel.close() + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + await self.aclose() + return False + + async def schedule_new_orchestration( + self, + orchestrator: Union[task.Orchestrator[TInput, TOutput], str], + *, + input: Optional[TInput] = None, + instance_id: Optional[str] = None, + start_at: Optional[datetime] = None, + reuse_id_policy: Optional[pb.OrchestrationIdReusePolicy] = None, + ) -> str: + name = orchestrator if isinstance(orchestrator, str) else task.get_name(orchestrator) + + req = pb.CreateInstanceRequest( + name=name, + instanceId=instance_id if instance_id else uuid.uuid4().hex, + input=wrappers_pb2.StringValue(value=shared.to_json(input)) + if input is not None + else None, + scheduledStartTimestamp=helpers.new_timestamp(start_at) if start_at else None, + version=helpers.get_string_value(None), + orchestrationIdReusePolicy=reuse_id_policy, + ) + + self._logger.info(f"Starting new '{name}' instance with ID = '{req.instanceId}'.") + res: pb.CreateInstanceResponse = await self._stub.StartInstance(req) + return res.instanceId + + async def get_orchestration_state( + self, instance_id: str, *, fetch_payloads: bool = True + ) -> Optional[OrchestrationState]: + req = pb.GetInstanceRequest(instanceId=instance_id, getInputsAndOutputs=fetch_payloads) + res: pb.GetInstanceResponse = await self._stub.GetInstance(req) + return new_orchestration_state(req.instanceId, res) + + async def wait_for_orchestration_start( + self, instance_id: str, *, fetch_payloads: bool = False, timeout: int = 0 + ) -> Optional[OrchestrationState]: + req = pb.GetInstanceRequest(instanceId=instance_id, getInputsAndOutputs=fetch_payloads) + try: + grpc_timeout = None if timeout == 0 else timeout + self._logger.info( + f"Waiting {'indefinitely' if timeout == 0 else f'up to {timeout}s'} for instance '{instance_id}' to start." + ) + res: pb.GetInstanceResponse = await self._stub.WaitForInstanceStart( + req, timeout=grpc_timeout + ) + return new_orchestration_state(req.instanceId, res) + except grpc.RpcError as rpc_error: + if rpc_error.code() == grpc.StatusCode.DEADLINE_EXCEEDED: # type: ignore + # Replace gRPC error with the built-in TimeoutError + raise TimeoutError("Timed-out waiting for the orchestration to start") + else: + raise + + async def wait_for_orchestration_completion( + self, instance_id: str, *, fetch_payloads: bool = True, timeout: int = 0 + ) -> Optional[OrchestrationState]: + req = pb.GetInstanceRequest(instanceId=instance_id, getInputsAndOutputs=fetch_payloads) + try: + grpc_timeout = None if timeout == 0 else timeout + self._logger.info( + f"Waiting {'indefinitely' if timeout == 0 else f'up to {timeout}s'} for instance '{instance_id}' to complete." + ) + res: pb.GetInstanceResponse = await self._stub.WaitForInstanceCompletion( + req, timeout=grpc_timeout + ) + state = new_orchestration_state(req.instanceId, res) + if not state: + return None + + if ( + state.runtime_status == OrchestrationStatus.FAILED + and state.failure_details is not None + ): + details = state.failure_details + self._logger.info( + f"Instance '{instance_id}' failed: [{details.error_type}] {details.message}" + ) + elif state.runtime_status == OrchestrationStatus.TERMINATED: + self._logger.info(f"Instance '{instance_id}' was terminated.") + elif state.runtime_status == OrchestrationStatus.COMPLETED: + self._logger.info(f"Instance '{instance_id}' completed.") + + return state + except grpc.RpcError as rpc_error: + if rpc_error.code() == grpc.StatusCode.DEADLINE_EXCEEDED: # type: ignore + # Replace gRPC error with the built-in TimeoutError + raise TimeoutError("Timed-out waiting for the orchestration to complete") + else: + raise + + async def raise_orchestration_event( + self, instance_id: str, event_name: str, *, data: Optional[Any] = None + ): + req = pb.RaiseEventRequest( + instanceId=instance_id, + name=event_name, + input=wrappers_pb2.StringValue(value=shared.to_json(data)) if data else None, + ) + + self._logger.info(f"Raising event '{event_name}' for instance '{instance_id}'.") + await self._stub.RaiseEvent(req) + + async def terminate_orchestration( + self, instance_id: str, *, output: Optional[Any] = None, recursive: bool = True + ): + req = pb.TerminateRequest( + instanceId=instance_id, + output=wrappers_pb2.StringValue(value=shared.to_json(output)) if output else None, + recursive=recursive, + ) + + self._logger.info(f"Terminating instance '{instance_id}'.") + await self._stub.TerminateInstance(req) + + async def suspend_orchestration(self, instance_id: str): + req = pb.SuspendRequest(instanceId=instance_id) + self._logger.info(f"Suspending instance '{instance_id}'.") + await self._stub.SuspendInstance(req) + + async def resume_orchestration(self, instance_id: str): + req = pb.ResumeRequest(instanceId=instance_id) + self._logger.info(f"Resuming instance '{instance_id}'.") + await self._stub.ResumeInstance(req) + + async def purge_orchestration(self, instance_id: str, recursive: bool = True): + req = pb.PurgeInstancesRequest(instanceId=instance_id, recursive=recursive) + self._logger.info(f"Purging instance '{instance_id}'.") + await self._stub.PurgeInstances(req) diff --git a/durabletask/internal/__init__.py b/durabletask/aio/internal/__init__.py similarity index 100% rename from durabletask/internal/__init__.py rename to durabletask/aio/internal/__init__.py diff --git a/durabletask/aio/internal/grpc_interceptor.py b/durabletask/aio/internal/grpc_interceptor.py new file mode 100644 index 00000000..843a95ba --- /dev/null +++ b/durabletask/aio/internal/grpc_interceptor.py @@ -0,0 +1,67 @@ +# Copyright (c) The Dapr Authors. +# Licensed under the MIT License. + +from collections import namedtuple + +from grpc import aio as grpc_aio + + +class _ClientCallDetails( + namedtuple( + "_ClientCallDetails", + ["method", "timeout", "metadata", "credentials", "wait_for_ready", "compression"], + ), + grpc_aio.ClientCallDetails, +): + pass + + +class DefaultClientInterceptorImpl( + grpc_aio.UnaryUnaryClientInterceptor, + grpc_aio.UnaryStreamClientInterceptor, + grpc_aio.StreamUnaryClientInterceptor, + grpc_aio.StreamStreamClientInterceptor, +): + """Async gRPC client interceptor to add metadata to all calls.""" + + def __init__(self, metadata: list[tuple[str, str]]): + super().__init__() + self._metadata = metadata + + def _intercept_call( + self, client_call_details: _ClientCallDetails + ) -> grpc_aio.ClientCallDetails: + if self._metadata is None: + return client_call_details + + if client_call_details.metadata is not None: + metadata = list(client_call_details.metadata) + else: + metadata = [] + + metadata.extend(self._metadata) + compression = getattr(client_call_details, "compression", None) + return _ClientCallDetails( + client_call_details.method, + client_call_details.timeout, + metadata, + client_call_details.credentials, + client_call_details.wait_for_ready, + compression, + ) + + async def intercept_unary_unary(self, continuation, client_call_details, request): + new_client_call_details = self._intercept_call(client_call_details) + return await continuation(new_client_call_details, request) + + async def intercept_unary_stream(self, continuation, client_call_details, request): + new_client_call_details = self._intercept_call(client_call_details) + return await continuation(new_client_call_details, request) + + async def intercept_stream_unary(self, continuation, client_call_details, request_iterator): + new_client_call_details = self._intercept_call(client_call_details) + return await continuation(new_client_call_details, request_iterator) + + async def intercept_stream_stream(self, continuation, client_call_details, request_iterator): + new_client_call_details = self._intercept_call(client_call_details) + return await continuation(new_client_call_details, request_iterator) diff --git a/durabletask/aio/internal/shared.py b/durabletask/aio/internal/shared.py new file mode 100644 index 00000000..cb4ffc0e --- /dev/null +++ b/durabletask/aio/internal/shared.py @@ -0,0 +1,62 @@ +# Copyright (c) The Dapr Authors. +# Licensed under the MIT License. + +from typing import Optional, Sequence, Union + +import grpc +from grpc import aio as grpc_aio +from grpc.aio import ChannelArgumentType + +from durabletask.internal.shared import ( + INSECURE_PROTOCOLS, + SECURE_PROTOCOLS, + get_default_host_address, +) + +ClientInterceptor = Union[ + grpc_aio.UnaryUnaryClientInterceptor, + grpc_aio.UnaryStreamClientInterceptor, + grpc_aio.StreamUnaryClientInterceptor, + grpc_aio.StreamStreamClientInterceptor, +] + + +def get_grpc_aio_channel( + host_address: Optional[str], + secure_channel: bool = False, + interceptors: Optional[Sequence[ClientInterceptor]] = None, + options: Optional[ChannelArgumentType] = None, +) -> grpc_aio.Channel: + """create a grpc asyncio channel + + Args: + host_address: The host address of the gRPC server. If None, uses the default address. + secure_channel: Whether to use a secure channel (TLS/SSL). Defaults to False. + interceptors: Optional sequence of client interceptors to apply to the channel. + options: Optional sequence of gRPC channel options as (key, value) tuples. Keys defined in https://grpc.github.io/grpc/core/group__grpc__arg__keys.html + """ + if host_address is None: + host_address = get_default_host_address() + + for protocol in SECURE_PROTOCOLS: + if host_address.lower().startswith(protocol): + secure_channel = True + host_address = host_address[len(protocol) :] + break + + for protocol in INSECURE_PROTOCOLS: + if host_address.lower().startswith(protocol): + secure_channel = False + host_address = host_address[len(protocol) :] + break + + if secure_channel: + channel = grpc_aio.secure_channel( + host_address, grpc.ssl_channel_credentials(), interceptors=interceptors, options=options + ) + else: + channel = grpc_aio.insecure_channel( + host_address, interceptors=interceptors, options=options + ) + + return channel diff --git a/durabletask/client.py b/durabletask/client.py index 82f920ad..06faf850 100644 --- a/durabletask/client.py +++ b/durabletask/client.py @@ -6,7 +6,7 @@ from dataclasses import dataclass from datetime import datetime from enum import Enum -from typing import Any, List, Tuple, TypeVar, Union +from typing import Any, Optional, Sequence, TypeVar, Union import grpc from google.protobuf import wrappers_pb2 @@ -16,13 +16,15 @@ import durabletask.internal.orchestrator_service_pb2_grpc as stubs import durabletask.internal.shared as shared from durabletask import task +from durabletask.internal.grpc_interceptor import DefaultClientInterceptorImpl -TInput = TypeVar('TInput') -TOutput = TypeVar('TOutput') +TInput = TypeVar("TInput") +TOutput = TypeVar("TOutput") class OrchestrationStatus(Enum): """The status of an orchestration instance.""" + RUNNING = pb.ORCHESTRATION_STATUS_RUNNING COMPLETED = pb.ORCHESTRATION_STATUS_COMPLETED FAILED = pb.ORCHESTRATION_STATUS_FAILED @@ -30,6 +32,8 @@ class OrchestrationStatus(Enum): CONTINUED_AS_NEW = pb.ORCHESTRATION_STATUS_CONTINUED_AS_NEW PENDING = pb.ORCHESTRATION_STATUS_PENDING SUSPENDED = pb.ORCHESTRATION_STATUS_SUSPENDED + CANCELED = pb.ORCHESTRATION_STATUS_CANCELED + STALLED = pb.ORCHESTRATION_STATUS_STALLED def __str__(self): return helpers.get_orchestration_status_str(self.value) @@ -42,16 +46,17 @@ class OrchestrationState: runtime_status: OrchestrationStatus created_at: datetime last_updated_at: datetime - serialized_input: Union[str, None] - serialized_output: Union[str, None] - serialized_custom_status: Union[str, None] - failure_details: Union[task.FailureDetails, None] + serialized_input: Optional[str] + serialized_output: Optional[str] + serialized_custom_status: Optional[str] + failure_details: Optional[task.FailureDetails] def raise_if_failed(self): if self.failure_details is not None: raise OrchestrationFailedError( f"Orchestration '{self.instance_id}' failed: {self.failure_details.message}", - self.failure_details) + self.failure_details, + ) class OrchestrationFailedError(Exception): @@ -64,18 +69,23 @@ def failure_details(self): return self._failure_details -def new_orchestration_state(instance_id: str, res: pb.GetInstanceResponse) -> Union[OrchestrationState, None]: +def new_orchestration_state( + instance_id: str, res: pb.GetInstanceResponse +) -> Optional[OrchestrationState]: if not res.exists: return None state = res.orchestrationState failure_details = None - if state.failureDetails.errorMessage != '' or state.failureDetails.errorType != '': + if state.failureDetails.errorMessage != "" or state.failureDetails.errorType != "": failure_details = task.FailureDetails( state.failureDetails.errorMessage, state.failureDetails.errorType, - state.failureDetails.stackTrace.value if not helpers.is_empty(state.failureDetails.stackTrace) else None) + state.failureDetails.stackTrace.value + if not helpers.is_empty(state.failureDetails.stackTrace) + else None, + ) return OrchestrationState( instance_id, @@ -86,54 +96,106 @@ def new_orchestration_state(instance_id: str, res: pb.GetInstanceResponse) -> Un state.input.value if not helpers.is_empty(state.input) else None, state.output.value if not helpers.is_empty(state.output) else None, state.customStatus.value if not helpers.is_empty(state.customStatus) else None, - failure_details) + failure_details, + ) class TaskHubGrpcClient: - - def __init__(self, *, - host_address: Union[str, None] = None, - metadata: Union[List[Tuple[str, str]], None] = None, - log_handler = None, - log_formatter: Union[logging.Formatter, None] = None, - secure_channel: bool = False): - channel = shared.get_grpc_channel(host_address, metadata, secure_channel=secure_channel) + def __init__( + self, + *, + host_address: Optional[str] = None, + metadata: Optional[list[tuple[str, str]]] = None, + log_handler: Optional[logging.Handler] = None, + log_formatter: Optional[logging.Formatter] = None, + secure_channel: bool = False, + interceptors: Optional[Sequence[shared.ClientInterceptor]] = None, + channel_options: Optional[Sequence[tuple[str, Any]]] = None, + ): + # If the caller provided metadata, we need to create a new interceptor for it and + # add it to the list of interceptors. + if interceptors is not None: + interceptors = list(interceptors) + if metadata is not None: + interceptors.append(DefaultClientInterceptorImpl(metadata)) + elif metadata is not None: + interceptors = [DefaultClientInterceptorImpl(metadata)] + else: + interceptors = None + + channel = shared.get_grpc_channel( + host_address=host_address, + secure_channel=secure_channel, + interceptors=interceptors, + options=channel_options, + ) + self._channel = channel self._stub = stubs.TaskHubSidecarServiceStub(channel) self._logger = shared.get_logger("client", log_handler, log_formatter) - def schedule_new_orchestration(self, orchestrator: Union[task.Orchestrator[TInput, TOutput], str], *, - input: Union[TInput, None] = None, - instance_id: Union[str, None] = None, - start_at: Union[datetime, None] = None, - reuse_id_policy: Union[pb.OrchestrationIdReusePolicy, None] = None) -> str: + def __enter__(self): + return self + def __exit__(self, exc_type, exc, tb): + try: + self.close() + finally: + return False + + def close(self) -> None: + """Close the underlying gRPC channel.""" + try: + # grpc.Channel.close() is idempotent + self._channel.close() + except Exception: + # Best-effort cleanup + pass + + def schedule_new_orchestration( + self, + orchestrator: Union[task.Orchestrator[TInput, TOutput], str], + *, + input: Optional[TInput] = None, + instance_id: Optional[str] = None, + start_at: Optional[datetime] = None, + reuse_id_policy: Optional[pb.OrchestrationIdReusePolicy] = None, + ) -> str: name = orchestrator if isinstance(orchestrator, str) else task.get_name(orchestrator) + input_pb = ( + wrappers_pb2.StringValue(value=shared.to_json(input)) if input is not None else None + ) + req = pb.CreateInstanceRequest( name=name, instanceId=instance_id if instance_id else uuid.uuid4().hex, - input=wrappers_pb2.StringValue(value=shared.to_json(input)) if input is not None else None, + input=input_pb, scheduledStartTimestamp=helpers.new_timestamp(start_at) if start_at else None, version=wrappers_pb2.StringValue(value=""), orchestrationIdReusePolicy=reuse_id_policy, - ) + ) self._logger.info(f"Starting new '{name}' instance with ID = '{req.instanceId}'.") res: pb.CreateInstanceResponse = self._stub.StartInstance(req) return res.instanceId - def get_orchestration_state(self, instance_id: str, *, fetch_payloads: bool = True) -> Union[OrchestrationState, None]: + def get_orchestration_state( + self, instance_id: str, *, fetch_payloads: bool = True + ) -> Optional[OrchestrationState]: req = pb.GetInstanceRequest(instanceId=instance_id, getInputsAndOutputs=fetch_payloads) res: pb.GetInstanceResponse = self._stub.GetInstance(req) return new_orchestration_state(req.instanceId, res) - def wait_for_orchestration_start(self, instance_id: str, *, - fetch_payloads: bool = False, - timeout: int = 60) -> Union[OrchestrationState, None]: + def wait_for_orchestration_start( + self, instance_id: str, *, fetch_payloads: bool = False, timeout: int = 0 + ) -> Optional[OrchestrationState]: req = pb.GetInstanceRequest(instanceId=instance_id, getInputsAndOutputs=fetch_payloads) try: - self._logger.info(f"Waiting up to {timeout}s for instance '{instance_id}' to start.") - res: pb.GetInstanceResponse = self._stub.WaitForInstanceStart(req, timeout=timeout) + grpc_timeout = None if timeout == 0 else timeout + self._logger.info( + f"Waiting {'indefinitely' if timeout == 0 else f'up to {timeout}s'} for instance '{instance_id}' to start." + ) + res: pb.GetInstanceResponse = self._stub.WaitForInstanceStart(req, timeout=grpc_timeout) return new_orchestration_state(req.instanceId, res) except grpc.RpcError as rpc_error: if rpc_error.code() == grpc.StatusCode.DEADLINE_EXCEEDED: # type: ignore @@ -142,20 +204,30 @@ def wait_for_orchestration_start(self, instance_id: str, *, else: raise - def wait_for_orchestration_completion(self, instance_id: str, *, - fetch_payloads: bool = True, - timeout: int = 60) -> Union[OrchestrationState, None]: + def wait_for_orchestration_completion( + self, instance_id: str, *, fetch_payloads: bool = True, timeout: int = 0 + ) -> Optional[OrchestrationState]: req = pb.GetInstanceRequest(instanceId=instance_id, getInputsAndOutputs=fetch_payloads) try: - self._logger.info(f"Waiting {timeout}s for instance '{instance_id}' to complete.") - res: pb.GetInstanceResponse = self._stub.WaitForInstanceCompletion(req, timeout=timeout) + grpc_timeout = None if timeout == 0 else timeout + self._logger.info( + f"Waiting {'indefinitely' if timeout == 0 else f'up to {timeout}s'} for instance '{instance_id}' to complete." + ) + res: pb.GetInstanceResponse = self._stub.WaitForInstanceCompletion( + req, timeout=grpc_timeout + ) state = new_orchestration_state(req.instanceId, res) if not state: return None - if state.runtime_status == OrchestrationStatus.FAILED and state.failure_details is not None: + if ( + state.runtime_status == OrchestrationStatus.FAILED + and state.failure_details is not None + ): details = state.failure_details - self._logger.info(f"Instance '{instance_id}' failed: [{details.error_type}] {details.message}") + self._logger.info( + f"Instance '{instance_id}' failed: [{details.error_type}] {details.message}" + ) elif state.runtime_status == OrchestrationStatus.TERMINATED: self._logger.info(f"Instance '{instance_id}' was terminated.") elif state.runtime_status == OrchestrationStatus.COMPLETED: @@ -169,23 +241,26 @@ def wait_for_orchestration_completion(self, instance_id: str, *, else: raise - def raise_orchestration_event(self, instance_id: str, event_name: str, *, - data: Union[Any, None] = None): + def raise_orchestration_event( + self, instance_id: str, event_name: str, *, data: Optional[Any] = None + ): req = pb.RaiseEventRequest( instanceId=instance_id, name=event_name, - input=wrappers_pb2.StringValue(value=shared.to_json(data)) if data else None) + input=wrappers_pb2.StringValue(value=shared.to_json(data)) if data else None, + ) self._logger.info(f"Raising event '{event_name}' for instance '{instance_id}'.") self._stub.RaiseEvent(req) - def terminate_orchestration(self, instance_id: str, *, - output: Union[Any, None] = None, - recursive: bool = True): + def terminate_orchestration( + self, instance_id: str, *, output: Optional[Any] = None, recursive: bool = True + ): req = pb.TerminateRequest( instanceId=instance_id, output=wrappers_pb2.StringValue(value=shared.to_json(output)) if output else None, - recursive=recursive) + recursive=recursive, + ) self._logger.info(f"Terminating instance '{instance_id}'.") self._stub.TerminateInstance(req) @@ -203,4 +278,4 @@ def resume_orchestration(self, instance_id: str): def purge_orchestration(self, instance_id: str, recursive: bool = True): req = pb.PurgeInstancesRequest(instanceId=instance_id, recursive=recursive) self._logger.info(f"Purging instance '{instance_id}'.") - self._stub.PurgeInstances() + self._stub.PurgeInstances(req) diff --git a/durabletask/deterministic.py b/durabletask/deterministic.py new file mode 100644 index 00000000..29437833 --- /dev/null +++ b/durabletask/deterministic.py @@ -0,0 +1,224 @@ +""" +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +""" +Deterministic utilities for Durable Task workflows (async and generator). + +This module provides deterministic alternatives to non-deterministic Python +functions, ensuring workflow replay consistency across different executions. +It is shared by both the asyncio authoring model and the generator-based model. +""" + +import hashlib +import random +import string as _string +import uuid +from collections.abc import Sequence +from dataclasses import dataclass +from datetime import datetime, timedelta +from typing import Optional, TypeVar + + +@dataclass +class DeterminismSeed: + """Seed data for deterministic operations.""" + + instance_id: str + orchestration_unix_ts: int + + def to_int(self) -> int: + """Convert seed to integer for PRNG initialization.""" + combined = f"{self.instance_id}:{self.orchestration_unix_ts}" + hash_bytes = hashlib.sha256(combined.encode("utf-8")).digest() + return int.from_bytes(hash_bytes[:8], byteorder="big") + + +def derive_seed(instance_id: str, orchestration_time: datetime) -> int: + """ + Derive a deterministic seed from instance ID and orchestration time. + """ + ts = int(orchestration_time.timestamp()) + return DeterminismSeed(instance_id=instance_id, orchestration_unix_ts=ts).to_int() + + +def deterministic_random(instance_id: str, orchestration_time: datetime) -> random.Random: + """ + Create a deterministic random number generator. + """ + seed = derive_seed(instance_id, orchestration_time) + return random.Random(seed) + + +def deterministic_uuid4(rnd: random.Random) -> uuid.UUID: + """ + Generate a deterministic UUID4 using the provided random generator. + + Note: This is deprecated in favor of deterministic_uuid_v5 which matches + the .NET implementation for cross-language compatibility. + """ + bytes_ = bytes(rnd.randrange(0, 256) for _ in range(16)) + bytes_list = list(bytes_) + bytes_list[6] = (bytes_list[6] & 0x0F) | 0x40 # Version 4 + bytes_list[8] = (bytes_list[8] & 0x3F) | 0x80 # Variant bits + return uuid.UUID(bytes=bytes(bytes_list)) + + +def deterministic_uuid_v5(instance_id: str, current_datetime: datetime, counter: int) -> uuid.UUID: + """ + Generate a deterministic UUID v5 matching the .NET implementation. + + This implementation matches the durabletask-dotnet NewGuid() method: + https://github.com/microsoft/durabletask-dotnet/blob/main/src/Worker/Core/Shims/TaskOrchestrationContextWrapper.cs + + Args: + instance_id: The orchestration instance ID. + current_datetime: The current orchestration datetime (frozen during replay). + counter: The per-call counter (starts at 0 on each replay). + + Returns: + A deterministic UUID v5 that will be the same across replays. + """ + # DNS namespace UUID - same as .NET DnsNamespaceValue + namespace = uuid.UUID("9e952958-5e33-4daf-827f-2fa12937b875") + + # Build name matching .NET format: instanceId_datetime_counter + # Using isoformat() which produces ISO 8601 format similar to .NET's ToString("o") + name = f"{instance_id}_{current_datetime.isoformat()}_{counter}" + + # Generate UUID v5 (SHA-1 based, matching .NET) + return uuid.uuid5(namespace, name) + + +class DeterministicContextMixin: + """ + Mixin providing deterministic helpers for workflow contexts. + + Assumes the inheriting class exposes `instance_id` and `current_utc_datetime` attributes. + + This implementation matches the .NET durabletask SDK approach with an explicit + counter for UUID generation that resets on each replay. + """ + + def __init__(self, *args, **kwargs): + """Initialize the mixin with UUID and timestamp counters.""" + super().__init__(*args, **kwargs) + # Counter for deterministic UUID generation (matches .NET newGuidCounter) + # This counter resets to 0 on each replay, ensuring determinism + self._uuid_counter: int = 0 + # Counter for deterministic timestamp sequencing (resets on replay) + self._timestamp_counter: int = 0 + + def now(self) -> datetime: + """Alias for deterministic current_utc_datetime.""" + return self.current_utc_datetime # type: ignore[attr-defined] + + def random(self) -> random.Random: + """Return a PRNG seeded deterministically from instance id and orchestration time.""" + rnd = deterministic_random( + self.instance_id, # type: ignore[attr-defined] + self.current_utc_datetime, # type: ignore[attr-defined] + ) + # Mark as deterministic for asyncio sandbox detector whitelisting of bound methods (randint, random) + try: + rnd._dt_deterministic = True + except Exception: + pass + return rnd + + def uuid4(self) -> uuid.UUID: + """ + Return a deterministically generated UUID v5 with explicit counter. + https://www.sohamkamani.com/uuid-versions-explained/#v5-non-random-uuids + + This matches the .NET implementation's NewGuid() method which uses: + - Instance ID + - Current UTC datetime (frozen during replay) + - Per-call counter (resets to 0 on each replay) + + The counter ensures multiple calls produce different UUIDs while maintaining + determinism across replays. + """ + # Lazily initialize counter if not set by __init__ (for compatibility) + if not hasattr(self, "_uuid_counter"): + self._uuid_counter = 0 + + result = deterministic_uuid_v5( + self.instance_id, # type: ignore[attr-defined] + self.current_utc_datetime, # type: ignore[attr-defined] + self._uuid_counter, + ) + self._uuid_counter += 1 + return result + + def new_guid(self) -> uuid.UUID: + """Alias for uuid4 for API parity with other SDKs.""" + return self.uuid4() + + def random_string(self, length: int, *, alphabet: Optional[str] = None) -> str: + """Return a deterministically generated random string of the given length.""" + if length < 0: + raise ValueError("length must be non-negative") + chars = alphabet if alphabet is not None else (_string.ascii_letters + _string.digits) + if not chars: + raise ValueError("alphabet must not be empty") + rnd = self.random() + size = len(chars) + return "".join(chars[rnd.randrange(0, size)] for _ in range(length)) + + def random_int(self, min_value: int = 0, max_value: int = 2**31 - 1) -> int: + """Return a deterministic random integer in the specified range.""" + if min_value > max_value: + raise ValueError("min_value must be <= max_value") + rnd = self.random() + return rnd.randint(min_value, max_value) + + T = TypeVar("T") + + def random_choice(self, sequence: Sequence[T]) -> T: + """Return a deterministic random element from a non-empty sequence.""" + if not sequence: + raise IndexError("Cannot choose from empty sequence") + rnd = self.random() + return rnd.choice(sequence) + + def now_with_sequence(self) -> datetime: + """ + Return deterministic timestamp with microsecond increment per call. + + Each call returns: current_utc_datetime + (counter * 1 microsecond) + + This provides ordered, unique timestamps for tracing/telemetry while maintaining + determinism across replays. The counter resets to 0 on each replay (similar to + _uuid_counter pattern). + + Perfect for preserving event ordering within a workflow without requiring activities. + + Returns: + datetime: Deterministic timestamp that increments on each call + + Example: + ```python + def workflow(ctx): + t1 = ctx.now_with_sequence() # 2024-01-01 12:00:00.000000 + result = yield ctx.call_activity(some_activity, input="data") + t2 = ctx.now_with_sequence() # 2024-01-01 12:00:00.000001 + # t1 < t2, preserving order for telemetry + ``` + """ + offset = timedelta(microseconds=self._timestamp_counter) + self._timestamp_counter += 1 + return self.current_utc_datetime + offset # type: ignore[attr-defined] + + def current_utc_datetime_with_sequence(self): + """Alias for now_with_sequence for API parity with other SDKs.""" + return self.now_with_sequence() diff --git a/durabletask/internal/PROTO_SOURCE_COMMIT_HASH b/durabletask/internal/PROTO_SOURCE_COMMIT_HASH new file mode 100644 index 00000000..1abbaf13 --- /dev/null +++ b/durabletask/internal/PROTO_SOURCE_COMMIT_HASH @@ -0,0 +1 @@ +889781bbe90e6ec84ebe169978c4f2fd0df74ff0 diff --git a/durabletask/internal/grpc_interceptor.py b/durabletask/internal/grpc_interceptor.py index 5b12ace0..f9e8fb59 100644 --- a/durabletask/internal/grpc_interceptor.py +++ b/durabletask/internal/grpc_interceptor.py @@ -2,50 +2,59 @@ # Licensed under the MIT License. from collections import namedtuple -from typing import List, Tuple import grpc class _ClientCallDetails( - namedtuple( - '_ClientCallDetails', - ['method', 'timeout', 'metadata', 'credentials', 'wait_for_ready', 'compression']), - grpc.ClientCallDetails): + namedtuple( + "_ClientCallDetails", + ["method", "timeout", "metadata", "credentials", "wait_for_ready", "compression"], + ), + grpc.ClientCallDetails, +): """This is an implementation of the ClientCallDetails interface needed for interceptors. This class takes six named values and inherits the ClientCallDetails from grpc package. This class encloses the values that describe a RPC to be invoked. """ + pass -class DefaultClientInterceptorImpl ( - grpc.UnaryUnaryClientInterceptor, grpc.UnaryStreamClientInterceptor, - grpc.StreamUnaryClientInterceptor, grpc.StreamStreamClientInterceptor): +class DefaultClientInterceptorImpl( + grpc.UnaryUnaryClientInterceptor, + grpc.UnaryStreamClientInterceptor, + grpc.StreamUnaryClientInterceptor, + grpc.StreamStreamClientInterceptor, +): """The class implements a UnaryUnaryClientInterceptor, UnaryStreamClientInterceptor, - StreamUnaryClientInterceptor and StreamStreamClientInterceptor from grpc to add an + StreamUnaryClientInterceptor and StreamStreamClientInterceptor from grpc to add an interceptor to add additional headers to all calls as needed.""" - def __init__(self, metadata: List[Tuple[str, str]]): + def __init__(self, metadata: list[tuple[str, str]]): super().__init__() self._metadata = metadata - def _intercept_call( - self, client_call_details: _ClientCallDetails) -> grpc.ClientCallDetails: + def _intercept_call(self, client_call_details: _ClientCallDetails) -> grpc.ClientCallDetails: """Internal intercept_call implementation which adds metadata to grpc metadata in the RPC - call details.""" + call details.""" if self._metadata is None: return client_call_details - + if client_call_details.metadata is not None: metadata = list(client_call_details.metadata) else: metadata = [] - + metadata.extend(self._metadata) client_call_details = _ClientCallDetails( - client_call_details.method, client_call_details.timeout, metadata, - client_call_details.credentials, client_call_details.wait_for_ready, client_call_details.compression) + client_call_details.method, + client_call_details.timeout, + metadata, + client_call_details.credentials, + client_call_details.wait_for_ready, + client_call_details.compression, + ) return client_call_details diff --git a/durabletask/internal/helpers.py b/durabletask/internal/helpers.py index c7354e52..3f04728a 100644 --- a/durabletask/internal/helpers.py +++ b/durabletask/internal/helpers.py @@ -3,7 +3,7 @@ import traceback from datetime import datetime -from typing import List, Union +from typing import Optional from google.protobuf import timestamp_pb2, wrappers_pb2 @@ -12,21 +12,27 @@ # TODO: The new_xxx_event methods are only used by test code and should be moved elsewhere -def new_orchestrator_started_event(timestamp: Union[datetime, None] = None) -> pb.HistoryEvent: +def new_orchestrator_started_event(timestamp: Optional[datetime] = None) -> pb.HistoryEvent: ts = timestamp_pb2.Timestamp() if timestamp is not None: ts.FromDatetime(timestamp) - return pb.HistoryEvent(eventId=-1, timestamp=ts, orchestratorStarted=pb.OrchestratorStartedEvent()) + return pb.HistoryEvent( + eventId=-1, timestamp=ts, orchestratorStarted=pb.OrchestratorStartedEvent() + ) -def new_execution_started_event(name: str, instance_id: str, encoded_input: Union[str, None] = None) -> pb.HistoryEvent: +def new_execution_started_event( + name: str, instance_id: str, encoded_input: Optional[str] = None +) -> pb.HistoryEvent: return pb.HistoryEvent( eventId=-1, timestamp=timestamp_pb2.Timestamp(), executionStarted=pb.ExecutionStartedEvent( name=name, input=get_string_value(encoded_input), - orchestrationInstance=pb.OrchestrationInstance(instanceId=instance_id))) + orchestrationInstance=pb.OrchestrationInstance(instanceId=instance_id), + ), + ) def new_timer_created_event(timer_id: int, fire_at: datetime) -> pb.HistoryEvent: @@ -35,7 +41,7 @@ def new_timer_created_event(timer_id: int, fire_at: datetime) -> pb.HistoryEvent return pb.HistoryEvent( eventId=timer_id, timestamp=timestamp_pb2.Timestamp(), - timerCreated=pb.TimerCreatedEvent(fireAt=ts) + timerCreated=pb.TimerCreatedEvent(fireAt=ts), ) @@ -45,23 +51,29 @@ def new_timer_fired_event(timer_id: int, fire_at: datetime) -> pb.HistoryEvent: return pb.HistoryEvent( eventId=-1, timestamp=timestamp_pb2.Timestamp(), - timerFired=pb.TimerFiredEvent(fireAt=ts, timerId=timer_id) + timerFired=pb.TimerFiredEvent(fireAt=ts, timerId=timer_id), ) -def new_task_scheduled_event(event_id: int, name: str, encoded_input: Union[str, None] = None) -> pb.HistoryEvent: +def new_task_scheduled_event( + event_id: int, name: str, encoded_input: Optional[str] = None +) -> pb.HistoryEvent: return pb.HistoryEvent( eventId=event_id, timestamp=timestamp_pb2.Timestamp(), - taskScheduled=pb.TaskScheduledEvent(name=name, input=get_string_value(encoded_input)) + taskScheduled=pb.TaskScheduledEvent(name=name, input=get_string_value(encoded_input)), ) -def new_task_completed_event(event_id: int, encoded_output: Union[str, None] = None) -> pb.HistoryEvent: +def new_task_completed_event( + event_id: int, encoded_output: Optional[str] = None +) -> pb.HistoryEvent: return pb.HistoryEvent( eventId=-1, timestamp=timestamp_pb2.Timestamp(), - taskCompleted=pb.TaskCompletedEvent(taskScheduledId=event_id, result=get_string_value(encoded_output)) + taskCompleted=pb.TaskCompletedEvent( + taskScheduledId=event_id, result=get_string_value(encoded_output) + ), ) @@ -69,32 +81,33 @@ def new_task_failed_event(event_id: int, ex: Exception) -> pb.HistoryEvent: return pb.HistoryEvent( eventId=-1, timestamp=timestamp_pb2.Timestamp(), - taskFailed=pb.TaskFailedEvent(taskScheduledId=event_id, failureDetails=new_failure_details(ex)) + taskFailed=pb.TaskFailedEvent( + taskScheduledId=event_id, failureDetails=new_failure_details(ex) + ), ) def new_sub_orchestration_created_event( - event_id: int, - name: str, - instance_id: str, - encoded_input: Union[str, None] = None) -> pb.HistoryEvent: + event_id: int, name: str, instance_id: str, encoded_input: Optional[str] = None +) -> pb.HistoryEvent: return pb.HistoryEvent( eventId=event_id, timestamp=timestamp_pb2.Timestamp(), subOrchestrationInstanceCreated=pb.SubOrchestrationInstanceCreatedEvent( - name=name, - input=get_string_value(encoded_input), - instanceId=instance_id) + name=name, input=get_string_value(encoded_input), instanceId=instance_id + ), ) -def new_sub_orchestration_completed_event(event_id: int, encoded_output: Union[str, None] = None) -> pb.HistoryEvent: +def new_sub_orchestration_completed_event( + event_id: int, encoded_output: Optional[str] = None +) -> pb.HistoryEvent: return pb.HistoryEvent( eventId=-1, timestamp=timestamp_pb2.Timestamp(), subOrchestrationInstanceCompleted=pb.SubOrchestrationInstanceCompletedEvent( - result=get_string_value(encoded_output), - taskScheduledId=event_id) + result=get_string_value(encoded_output), taskScheduledId=event_id + ), ) @@ -103,8 +116,8 @@ def new_sub_orchestration_failed_event(event_id: int, ex: Exception) -> pb.Histo eventId=-1, timestamp=timestamp_pb2.Timestamp(), subOrchestrationInstanceFailed=pb.SubOrchestrationInstanceFailedEvent( - failureDetails=new_failure_details(ex), - taskScheduledId=event_id) + failureDetails=new_failure_details(ex), taskScheduledId=event_id + ), ) @@ -112,15 +125,15 @@ def new_failure_details(ex: Exception) -> pb.TaskFailureDetails: return pb.TaskFailureDetails( errorType=type(ex).__name__, errorMessage=str(ex), - stackTrace=wrappers_pb2.StringValue(value=''.join(traceback.format_tb(ex.__traceback__))) + stackTrace=wrappers_pb2.StringValue(value="".join(traceback.format_tb(ex.__traceback__))), ) -def new_event_raised_event(name: str, encoded_input: Union[str, None] = None) -> pb.HistoryEvent: +def new_event_raised_event(name: str, encoded_input: Optional[str] = None) -> pb.HistoryEvent: return pb.HistoryEvent( eventId=-1, timestamp=timestamp_pb2.Timestamp(), - eventRaised=pb.EventRaisedEvent(name=name, input=get_string_value(encoded_input)) + eventRaised=pb.EventRaisedEvent(name=name, input=get_string_value(encoded_input)), ) @@ -128,29 +141,25 @@ def new_suspend_event() -> pb.HistoryEvent: return pb.HistoryEvent( eventId=-1, timestamp=timestamp_pb2.Timestamp(), - executionSuspended=pb.ExecutionSuspendedEvent() + executionSuspended=pb.ExecutionSuspendedEvent(), ) def new_resume_event() -> pb.HistoryEvent: return pb.HistoryEvent( - eventId=-1, - timestamp=timestamp_pb2.Timestamp(), - executionResumed=pb.ExecutionResumedEvent() + eventId=-1, timestamp=timestamp_pb2.Timestamp(), executionResumed=pb.ExecutionResumedEvent() ) -def new_terminated_event(*, encoded_output: Union[str, None] = None) -> pb.HistoryEvent: +def new_terminated_event(*, encoded_output: Optional[str] = None) -> pb.HistoryEvent: return pb.HistoryEvent( eventId=-1, timestamp=timestamp_pb2.Timestamp(), - executionTerminated=pb.ExecutionTerminatedEvent( - input=get_string_value(encoded_output) - ) + executionTerminated=pb.ExecutionTerminatedEvent(input=get_string_value(encoded_output)), ) -def get_string_value(val: Union[str, None]) -> Union[wrappers_pb2.StringValue, None]: +def get_string_value(val: Optional[str]) -> Optional[wrappers_pb2.StringValue]: if val is None: return None else: @@ -158,18 +167,34 @@ def get_string_value(val: Union[str, None]) -> Union[wrappers_pb2.StringValue, N def new_complete_orchestration_action( - id: int, - status: pb.OrchestrationStatus, - result: Union[str, None] = None, - failure_details: Union[pb.TaskFailureDetails, None] = None, - carryover_events: Union[List[pb.HistoryEvent], None] = None) -> pb.OrchestratorAction: + id: int, + status: pb.OrchestrationStatus, + result: Optional[str] = None, + failure_details: Optional[pb.TaskFailureDetails] = None, + carryover_events: Optional[list[pb.HistoryEvent]] = None, + router: Optional[pb.TaskRouter] = None, +) -> pb.OrchestratorAction: completeOrchestrationAction = pb.CompleteOrchestrationAction( orchestrationStatus=status, result=get_string_value(result), failureDetails=failure_details, - carryoverEvents=carryover_events) + carryoverEvents=carryover_events, + ) + + return pb.OrchestratorAction( + id=id, + completeOrchestration=completeOrchestrationAction, + router=router, + ) + - return pb.OrchestratorAction(id=id, completeOrchestration=completeOrchestrationAction) +def new_orchestrator_version_not_available_action( + id: int, +) -> pb.OrchestratorAction: + return pb.OrchestratorAction( + id=id, + orchestratorVersionNotAvailable=pb.OrchestratorVersionNotAvailableAction(), + ) def new_create_timer_action(id: int, fire_at: datetime) -> pb.OrchestratorAction: @@ -178,11 +203,18 @@ def new_create_timer_action(id: int, fire_at: datetime) -> pb.OrchestratorAction return pb.OrchestratorAction(id=id, createTimer=pb.CreateTimerAction(fireAt=timestamp)) -def new_schedule_task_action(id: int, name: str, encoded_input: Union[str, None]) -> pb.OrchestratorAction: - return pb.OrchestratorAction(id=id, scheduleTask=pb.ScheduleTaskAction( - name=name, - input=get_string_value(encoded_input) - )) +def new_schedule_task_action( + id: int, name: str, encoded_input: Optional[str], router: Optional[pb.TaskRouter] = None +) -> pb.OrchestratorAction: + return pb.OrchestratorAction( + id=id, + scheduleTask=pb.ScheduleTaskAction( + name=name, + input=get_string_value(encoded_input), + router=router, + ), + router=router, + ) def new_timestamp(dt: datetime) -> timestamp_pb2.Timestamp: @@ -192,25 +224,32 @@ def new_timestamp(dt: datetime) -> timestamp_pb2.Timestamp: def new_create_sub_orchestration_action( - id: int, - name: str, - instance_id: Union[str, None], - encoded_input: Union[str, None]) -> pb.OrchestratorAction: - return pb.OrchestratorAction(id=id, createSubOrchestration=pb.CreateSubOrchestrationAction( - name=name, - instanceId=instance_id, - input=get_string_value(encoded_input) - )) + id: int, + name: str, + instance_id: Optional[str], + encoded_input: Optional[str], + router: Optional[pb.TaskRouter] = None, +) -> pb.OrchestratorAction: + return pb.OrchestratorAction( + id=id, + createSubOrchestration=pb.CreateSubOrchestrationAction( + name=name, + instanceId=instance_id, + input=get_string_value(encoded_input), + router=router, + ), + router=router, + ) def is_empty(v: wrappers_pb2.StringValue): - return v is None or v.value == '' + return v is None or v.value == "" def get_orchestration_status_str(status: pb.OrchestrationStatus): try: const_name = pb.OrchestrationStatus.Name(status) - if const_name.startswith('ORCHESTRATION_STATUS_'): - return const_name[len('ORCHESTRATION_STATUS_'):] + if const_name.startswith("ORCHESTRATION_STATUS_"): + return const_name[len("ORCHESTRATION_STATUS_") :] except Exception: return "UNKNOWN" diff --git a/durabletask/internal/orchestrator_service_pb2.py b/durabletask/internal/orchestrator_service_pb2.py index 6ee3bbb5..93155725 100644 --- a/durabletask/internal/orchestrator_service_pb2.py +++ b/durabletask/internal/orchestrator_service_pb2.py @@ -1,8 +1,8 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # NO CHECKED-IN PROTOBUF GENCODE -# source: orchestrator_service.proto -# Protobuf Python Version: 5.27.2 +# source: durabletask/internal/orchestrator_service.proto +# Protobuf Python Version: 6.31.1 """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool @@ -11,11 +11,11 @@ from google.protobuf.internal import builder as _builder _runtime_version.ValidateProtobufRuntimeVersion( _runtime_version.Domain.PUBLIC, - 5, - 27, - 2, + 6, + 31, + 1, '', - 'orchestrator_service.proto' + 'durabletask/internal/orchestrator_service.proto' ) # @@protoc_insertion_point(imports) @@ -28,196 +28,266 @@ from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1aorchestrator_service.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1bgoogle/protobuf/empty.proto\"^\n\x15OrchestrationInstance\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xed\x01\n\x0f\x41\x63tivityRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\x12\x0e\n\x06taskId\x18\x05 \x01(\x05\x12)\n\x12parentTraceContext\x18\x06 \x01(\x0b\x32\r.TraceContext\"\x91\x01\n\x10\x41\x63tivityResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0e\n\x06taskId\x18\x02 \x01(\x05\x12,\n\x06result\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\"\xb2\x01\n\x12TaskFailureDetails\x12\x11\n\terrorType\x18\x01 \x01(\t\x12\x14\n\x0c\x65rrorMessage\x18\x02 \x01(\t\x12\x30\n\nstackTrace\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x0cinnerFailure\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\x12\x16\n\x0eisNonRetriable\x18\x05 \x01(\x08\"\xbf\x01\n\x12ParentInstanceInfo\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12*\n\x04name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\"i\n\x0cTraceContext\x12\x13\n\x0btraceParent\x18\x01 \x01(\t\x12\x12\n\x06spanID\x18\x02 \x01(\tB\x02\x18\x01\x12\x30\n\ntraceState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x88\x03\n\x15\x45xecutionStartedEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\x12+\n\x0eparentInstance\x18\x05 \x01(\x0b\x32\x13.ParentInstanceInfo\x12;\n\x17scheduledStartTimestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12)\n\x12parentTraceContext\x18\x07 \x01(\x0b\x32\r.TraceContext\x12\x39\n\x13orchestrationSpanID\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xa7\x01\n\x17\x45xecutionCompletedEvent\x12\x31\n\x13orchestrationStatus\x18\x01 \x01(\x0e\x32\x14.OrchestrationStatus\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x03 \x01(\x0b\x32\x13.TaskFailureDetails\"X\n\x18\x45xecutionTerminatedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x0f\n\x07recurse\x18\x02 \x01(\x08\"\xa9\x01\n\x12TaskScheduledEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x12parentTraceContext\x18\x04 \x01(\x0b\x32\r.TraceContext\"[\n\x12TaskCompletedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"W\n\x0fTaskFailedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\"\xcf\x01\n$SubOrchestrationInstanceCreatedEvent\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x12parentTraceContext\x18\x05 \x01(\x0b\x32\r.TraceContext\"o\n&SubOrchestrationInstanceCompletedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"k\n#SubOrchestrationInstanceFailedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\"?\n\x11TimerCreatedEvent\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"N\n\x0fTimerFiredEvent\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0f\n\x07timerId\x18\x02 \x01(\x05\"\x1a\n\x18OrchestratorStartedEvent\"\x1c\n\x1aOrchestratorCompletedEvent\"_\n\x0e\x45ventSentEvent\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"M\n\x10\x45ventRaisedEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x05input\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\":\n\x0cGenericEvent\x12*\n\x04\x64\x61ta\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"D\n\x11HistoryStateEvent\x12/\n\x12orchestrationState\x18\x01 \x01(\x0b\x32\x13.OrchestrationState\"A\n\x12\x43ontinueAsNewEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"F\n\x17\x45xecutionSuspendedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"D\n\x15\x45xecutionResumedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x86\t\n\x0cHistoryEvent\x12\x0f\n\x07\x65ventId\x18\x01 \x01(\x05\x12-\n\ttimestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x10\x65xecutionStarted\x18\x03 \x01(\x0b\x32\x16.ExecutionStartedEventH\x00\x12\x36\n\x12\x65xecutionCompleted\x18\x04 \x01(\x0b\x32\x18.ExecutionCompletedEventH\x00\x12\x38\n\x13\x65xecutionTerminated\x18\x05 \x01(\x0b\x32\x19.ExecutionTerminatedEventH\x00\x12,\n\rtaskScheduled\x18\x06 \x01(\x0b\x32\x13.TaskScheduledEventH\x00\x12,\n\rtaskCompleted\x18\x07 \x01(\x0b\x32\x13.TaskCompletedEventH\x00\x12&\n\ntaskFailed\x18\x08 \x01(\x0b\x32\x10.TaskFailedEventH\x00\x12P\n\x1fsubOrchestrationInstanceCreated\x18\t \x01(\x0b\x32%.SubOrchestrationInstanceCreatedEventH\x00\x12T\n!subOrchestrationInstanceCompleted\x18\n \x01(\x0b\x32\'.SubOrchestrationInstanceCompletedEventH\x00\x12N\n\x1esubOrchestrationInstanceFailed\x18\x0b \x01(\x0b\x32$.SubOrchestrationInstanceFailedEventH\x00\x12*\n\x0ctimerCreated\x18\x0c \x01(\x0b\x32\x12.TimerCreatedEventH\x00\x12&\n\ntimerFired\x18\r \x01(\x0b\x32\x10.TimerFiredEventH\x00\x12\x38\n\x13orchestratorStarted\x18\x0e \x01(\x0b\x32\x19.OrchestratorStartedEventH\x00\x12<\n\x15orchestratorCompleted\x18\x0f \x01(\x0b\x32\x1b.OrchestratorCompletedEventH\x00\x12$\n\teventSent\x18\x10 \x01(\x0b\x32\x0f.EventSentEventH\x00\x12(\n\x0b\x65ventRaised\x18\x11 \x01(\x0b\x32\x11.EventRaisedEventH\x00\x12%\n\x0cgenericEvent\x18\x12 \x01(\x0b\x32\r.GenericEventH\x00\x12*\n\x0chistoryState\x18\x13 \x01(\x0b\x32\x12.HistoryStateEventH\x00\x12,\n\rcontinueAsNew\x18\x14 \x01(\x0b\x32\x13.ContinueAsNewEventH\x00\x12\x36\n\x12\x65xecutionSuspended\x18\x15 \x01(\x0b\x32\x18.ExecutionSuspendedEventH\x00\x12\x32\n\x10\x65xecutionResumed\x18\x16 \x01(\x0b\x32\x16.ExecutionResumedEventH\x00\x42\x0b\n\teventType\"~\n\x12ScheduleTaskAction\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x9c\x01\n\x1c\x43reateSubOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"?\n\x11\x43reateTimerAction\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"u\n\x0fSendEventAction\x12(\n\x08instance\x18\x01 \x01(\x0b\x32\x16.OrchestrationInstance\x12\x0c\n\x04name\x18\x02 \x01(\t\x12*\n\x04\x64\x61ta\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xb4\x02\n\x1b\x43ompleteOrchestrationAction\x12\x31\n\x13orchestrationStatus\x18\x01 \x01(\x0e\x32\x14.OrchestrationStatus\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07\x64\x65tails\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\nnewVersion\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12&\n\x0f\x63\x61rryoverEvents\x18\x05 \x03(\x0b\x32\r.HistoryEvent\x12+\n\x0e\x66\x61ilureDetails\x18\x06 \x01(\x0b\x32\x13.TaskFailureDetails\"q\n\x1cTerminateOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x0f\n\x07recurse\x18\x03 \x01(\x08\"\xfa\x02\n\x12OrchestratorAction\x12\n\n\x02id\x18\x01 \x01(\x05\x12+\n\x0cscheduleTask\x18\x02 \x01(\x0b\x32\x13.ScheduleTaskActionH\x00\x12?\n\x16\x63reateSubOrchestration\x18\x03 \x01(\x0b\x32\x1d.CreateSubOrchestrationActionH\x00\x12)\n\x0b\x63reateTimer\x18\x04 \x01(\x0b\x32\x12.CreateTimerActionH\x00\x12%\n\tsendEvent\x18\x05 \x01(\x0b\x32\x10.SendEventActionH\x00\x12=\n\x15\x63ompleteOrchestration\x18\x06 \x01(\x0b\x32\x1c.CompleteOrchestrationActionH\x00\x12?\n\x16terminateOrchestration\x18\x07 \x01(\x0b\x32\x1d.TerminateOrchestrationActionH\x00\x42\x18\n\x16orchestratorActionType\"\xda\x01\n\x13OrchestratorRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12!\n\npastEvents\x18\x03 \x03(\x0b\x32\r.HistoryEvent\x12 \n\tnewEvents\x18\x04 \x03(\x0b\x32\r.HistoryEvent\x12\x37\n\x10\x65ntityParameters\x18\x05 \x01(\x0b\x32\x1d.OrchestratorEntityParameters\"\x84\x01\n\x14OrchestratorResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12$\n\x07\x61\x63tions\x18\x02 \x03(\x0b\x32\x13.OrchestratorAction\x12\x32\n\x0c\x63ustomStatus\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xa3\x03\n\x15\x43reateInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12;\n\x17scheduledStartTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12?\n\x1aorchestrationIdReusePolicy\x18\x06 \x01(\x0b\x32\x1b.OrchestrationIdReusePolicy\x12\x31\n\x0b\x65xecutionId\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x04tags\x18\x08 \x03(\x0b\x32 .CreateInstanceRequest.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"w\n\x1aOrchestrationIdReusePolicy\x12-\n\x0foperationStatus\x18\x01 \x03(\x0e\x32\x14.OrchestrationStatus\x12*\n\x06\x61\x63tion\x18\x02 \x01(\x0e\x32\x1a.CreateOrchestrationAction\",\n\x16\x43reateInstanceResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\"E\n\x12GetInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x1b\n\x13getInputsAndOutputs\x18\x02 \x01(\x08\"V\n\x13GetInstanceResponse\x12\x0e\n\x06\x65xists\x18\x01 \x01(\x08\x12/\n\x12orchestrationState\x18\x02 \x01(\x0b\x32\x13.OrchestrationState\"Y\n\x15RewindInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x18\n\x16RewindInstanceResponse\"\xa4\x05\n\x12OrchestrationState\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\x13orchestrationStatus\x18\x04 \x01(\x0e\x32\x14.OrchestrationStatus\x12;\n\x17scheduledStartTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x34\n\x10\x63reatedTimestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x38\n\x14lastUpdatedTimestamp\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\x05input\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12,\n\x06output\x18\t \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0c\x63ustomStatus\x18\n \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x0b \x01(\x0b\x32\x13.TaskFailureDetails\x12\x31\n\x0b\x65xecutionId\x18\x0c \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x12\x63ompletedTimestamp\x18\r \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x36\n\x10parentInstanceId\x18\x0e \x01(\x0b\x32\x1c.google.protobuf.StringValue\"b\n\x11RaiseEventRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x14\n\x12RaiseEventResponse\"g\n\x10TerminateRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06output\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x11\n\trecursive\x18\x03 \x01(\x08\"\x13\n\x11TerminateResponse\"R\n\x0eSuspendRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x11\n\x0fSuspendResponse\"Q\n\rResumeRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x10\n\x0eResumeResponse\"6\n\x15QueryInstancesRequest\x12\x1d\n\x05query\x18\x01 \x01(\x0b\x32\x0e.InstanceQuery\"\x82\x03\n\rInstanceQuery\x12+\n\rruntimeStatus\x18\x01 \x03(\x0e\x32\x14.OrchestrationStatus\x12\x33\n\x0f\x63reatedTimeFrom\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rcreatedTimeTo\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0ctaskHubNames\x18\x04 \x03(\x0b\x32\x1c.google.protobuf.StringValue\x12\x18\n\x10maxInstanceCount\x18\x05 \x01(\x05\x12\x37\n\x11\x63ontinuationToken\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10instanceIdPrefix\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1d\n\x15\x66\x65tchInputsAndOutputs\x18\x08 \x01(\x08\"\x82\x01\n\x16QueryInstancesResponse\x12/\n\x12orchestrationState\x18\x01 \x03(\x0b\x32\x13.OrchestrationState\x12\x37\n\x11\x63ontinuationToken\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x80\x01\n\x15PurgeInstancesRequest\x12\x14\n\ninstanceId\x18\x01 \x01(\tH\x00\x12\x33\n\x13purgeInstanceFilter\x18\x02 \x01(\x0b\x32\x14.PurgeInstanceFilterH\x00\x12\x11\n\trecursive\x18\x03 \x01(\x08\x42\t\n\x07request\"\xaa\x01\n\x13PurgeInstanceFilter\x12\x33\n\x0f\x63reatedTimeFrom\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rcreatedTimeTo\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\rruntimeStatus\x18\x03 \x03(\x0e\x32\x14.OrchestrationStatus\"6\n\x16PurgeInstancesResponse\x12\x1c\n\x14\x64\x65letedInstanceCount\x18\x01 \x01(\x05\"0\n\x14\x43reateTaskHubRequest\x12\x18\n\x10recreateIfExists\x18\x01 \x01(\x08\"\x17\n\x15\x43reateTaskHubResponse\"\x16\n\x14\x44\x65leteTaskHubRequest\"\x17\n\x15\x44\x65leteTaskHubResponse\"\xaa\x01\n\x13SignalEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x11\n\trequestId\x18\x04 \x01(\t\x12\x31\n\rscheduledTime\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\x16\n\x14SignalEntityResponse\"<\n\x10GetEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x14\n\x0cincludeState\x18\x02 \x01(\x08\"D\n\x11GetEntityResponse\x12\x0e\n\x06\x65xists\x18\x01 \x01(\x08\x12\x1f\n\x06\x65ntity\x18\x02 \x01(\x0b\x32\x0f.EntityMetadata\"\xcb\x02\n\x0b\x45ntityQuery\x12:\n\x14instanceIdStartsWith\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x34\n\x10lastModifiedFrom\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0elastModifiedTo\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x14\n\x0cincludeState\x18\x04 \x01(\x08\x12\x18\n\x10includeTransient\x18\x05 \x01(\x08\x12-\n\x08pageSize\x18\x06 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12\x37\n\x11\x63ontinuationToken\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"3\n\x14QueryEntitiesRequest\x12\x1b\n\x05query\x18\x01 \x01(\x0b\x32\x0c.EntityQuery\"s\n\x15QueryEntitiesResponse\x12!\n\x08\x65ntities\x18\x01 \x03(\x0b\x32\x0f.EntityMetadata\x12\x37\n\x11\x63ontinuationToken\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xdb\x01\n\x0e\x45ntityMetadata\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x34\n\x10lastModifiedTime\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x18\n\x10\x62\x61\x63klogQueueSize\x18\x03 \x01(\x05\x12.\n\x08lockedBy\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x0fserializedState\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x8f\x01\n\x19\x43leanEntityStorageRequest\x12\x37\n\x11\x63ontinuationToken\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1b\n\x13removeEmptyEntities\x18\x02 \x01(\x08\x12\x1c\n\x14releaseOrphanedLocks\x18\x03 \x01(\x08\"\x92\x01\n\x1a\x43leanEntityStorageResponse\x12\x37\n\x11\x63ontinuationToken\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1c\n\x14\x65mptyEntitiesRemoved\x18\x02 \x01(\x05\x12\x1d\n\x15orphanedLocksReleased\x18\x03 \x01(\x05\"]\n\x1cOrchestratorEntityParameters\x12=\n\x1a\x65ntityMessageReorderWindow\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\"\x82\x01\n\x12\x45ntityBatchRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65ntityState\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12%\n\noperations\x18\x03 \x03(\x0b\x32\x11.OperationRequest\"\xb9\x01\n\x11\x45ntityBatchResult\x12!\n\x07results\x18\x01 \x03(\x0b\x32\x10.OperationResult\x12!\n\x07\x61\x63tions\x18\x02 \x03(\x0b\x32\x10.OperationAction\x12\x31\n\x0b\x65ntityState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\"e\n\x10OperationRequest\x12\x11\n\toperation\x18\x01 \x01(\t\x12\x11\n\trequestId\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"w\n\x0fOperationResult\x12*\n\x07success\x18\x01 \x01(\x0b\x32\x17.OperationResultSuccessH\x00\x12*\n\x07\x66\x61ilure\x18\x02 \x01(\x0b\x32\x17.OperationResultFailureH\x00\x42\x0c\n\nresultType\"F\n\x16OperationResultSuccess\x12,\n\x06result\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"E\n\x16OperationResultFailure\x12+\n\x0e\x66\x61ilureDetails\x18\x01 \x01(\x0b\x32\x13.TaskFailureDetails\"\x9c\x01\n\x0fOperationAction\x12\n\n\x02id\x18\x01 \x01(\x05\x12\'\n\nsendSignal\x18\x02 \x01(\x0b\x32\x11.SendSignalActionH\x00\x12=\n\x15startNewOrchestration\x18\x03 \x01(\x0b\x32\x1c.StartNewOrchestrationActionH\x00\x42\x15\n\x13operationActionType\"\x94\x01\n\x10SendSignalAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rscheduledTime\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xce\x01\n\x1bStartNewOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rscheduledTime\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\x15\n\x13GetWorkItemsRequest\"\xe1\x01\n\x08WorkItem\x12\x33\n\x13orchestratorRequest\x18\x01 \x01(\x0b\x32\x14.OrchestratorRequestH\x00\x12+\n\x0f\x61\x63tivityRequest\x18\x02 \x01(\x0b\x32\x10.ActivityRequestH\x00\x12,\n\rentityRequest\x18\x03 \x01(\x0b\x32\x13.EntityBatchRequestH\x00\x12!\n\nhealthPing\x18\x04 \x01(\x0b\x32\x0b.HealthPingH\x00\x12\x17\n\x0f\x63ompletionToken\x18\n \x01(\tB\t\n\x07request\"\x16\n\x14\x43ompleteTaskResponse\"\x0c\n\nHealthPing*\xb5\x02\n\x13OrchestrationStatus\x12 \n\x1cORCHESTRATION_STATUS_RUNNING\x10\x00\x12\"\n\x1eORCHESTRATION_STATUS_COMPLETED\x10\x01\x12)\n%ORCHESTRATION_STATUS_CONTINUED_AS_NEW\x10\x02\x12\x1f\n\x1bORCHESTRATION_STATUS_FAILED\x10\x03\x12!\n\x1dORCHESTRATION_STATUS_CANCELED\x10\x04\x12#\n\x1fORCHESTRATION_STATUS_TERMINATED\x10\x05\x12 \n\x1cORCHESTRATION_STATUS_PENDING\x10\x06\x12\"\n\x1eORCHESTRATION_STATUS_SUSPENDED\x10\x07*A\n\x19\x43reateOrchestrationAction\x12\t\n\x05\x45RROR\x10\x00\x12\n\n\x06IGNORE\x10\x01\x12\r\n\tTERMINATE\x10\x02\x32\xfc\n\n\x15TaskHubSidecarService\x12\x37\n\x05Hello\x12\x16.google.protobuf.Empty\x1a\x16.google.protobuf.Empty\x12@\n\rStartInstance\x12\x16.CreateInstanceRequest\x1a\x17.CreateInstanceResponse\x12\x38\n\x0bGetInstance\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x41\n\x0eRewindInstance\x12\x16.RewindInstanceRequest\x1a\x17.RewindInstanceResponse\x12\x41\n\x14WaitForInstanceStart\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x46\n\x19WaitForInstanceCompletion\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x35\n\nRaiseEvent\x12\x12.RaiseEventRequest\x1a\x13.RaiseEventResponse\x12:\n\x11TerminateInstance\x12\x11.TerminateRequest\x1a\x12.TerminateResponse\x12\x34\n\x0fSuspendInstance\x12\x0f.SuspendRequest\x1a\x10.SuspendResponse\x12\x31\n\x0eResumeInstance\x12\x0e.ResumeRequest\x1a\x0f.ResumeResponse\x12\x41\n\x0eQueryInstances\x12\x16.QueryInstancesRequest\x1a\x17.QueryInstancesResponse\x12\x41\n\x0ePurgeInstances\x12\x16.PurgeInstancesRequest\x1a\x17.PurgeInstancesResponse\x12\x31\n\x0cGetWorkItems\x12\x14.GetWorkItemsRequest\x1a\t.WorkItem0\x01\x12@\n\x14\x43ompleteActivityTask\x12\x11.ActivityResponse\x1a\x15.CompleteTaskResponse\x12H\n\x18\x43ompleteOrchestratorTask\x12\x15.OrchestratorResponse\x1a\x15.CompleteTaskResponse\x12?\n\x12\x43ompleteEntityTask\x12\x12.EntityBatchResult\x1a\x15.CompleteTaskResponse\x12>\n\rCreateTaskHub\x12\x15.CreateTaskHubRequest\x1a\x16.CreateTaskHubResponse\x12>\n\rDeleteTaskHub\x12\x15.DeleteTaskHubRequest\x1a\x16.DeleteTaskHubResponse\x12;\n\x0cSignalEntity\x12\x14.SignalEntityRequest\x1a\x15.SignalEntityResponse\x12\x32\n\tGetEntity\x12\x11.GetEntityRequest\x1a\x12.GetEntityResponse\x12>\n\rQueryEntities\x12\x15.QueryEntitiesRequest\x1a\x16.QueryEntitiesResponse\x12M\n\x12\x43leanEntityStorage\x12\x1a.CleanEntityStorageRequest\x1a\x1b.CleanEntityStorageResponseBf\n1com.microsoft.durabletask.implementation.protobufZ\x10/internal/protos\xaa\x02\x1eMicrosoft.DurableTask.Protobufb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n/durabletask/internal/orchestrator_service.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1bgoogle/protobuf/empty.proto\"K\n\nTaskRouter\x12\x13\n\x0bsourceAppID\x18\x01 \x01(\t\x12\x18\n\x0btargetAppID\x18\x02 \x01(\tH\x00\x88\x01\x01\x42\x0e\n\x0c_targetAppID\"C\n\x14OrchestrationVersion\x12\x0f\n\x07patches\x18\x01 \x03(\t\x12\x11\n\x04name\x18\x02 \x01(\tH\x00\x88\x01\x01\x42\x07\n\x05_name\"^\n\x15OrchestrationInstance\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x86\x02\n\x0f\x41\x63tivityRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\x12\x0e\n\x06taskId\x18\x05 \x01(\x05\x12)\n\x12parentTraceContext\x18\x06 \x01(\x0b\x32\r.TraceContext\x12\x17\n\x0ftaskExecutionId\x18\x07 \x01(\t\"\xaa\x01\n\x10\x41\x63tivityResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0e\n\x06taskId\x18\x02 \x01(\x05\x12,\n\x06result\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\x12\x17\n\x0f\x63ompletionToken\x18\x05 \x01(\t\"\xb2\x01\n\x12TaskFailureDetails\x12\x11\n\terrorType\x18\x01 \x01(\t\x12\x14\n\x0c\x65rrorMessage\x18\x02 \x01(\t\x12\x30\n\nstackTrace\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x0cinnerFailure\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\x12\x16\n\x0eisNonRetriable\x18\x05 \x01(\x08\"\xdd\x01\n\x12ParentInstanceInfo\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12*\n\x04name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\x12\x12\n\x05\x61ppID\x18\x05 \x01(\tH\x00\x88\x01\x01\x42\x08\n\x06_appID\"-\n\x17RerunParentInstanceInfo\x12\x12\n\ninstanceID\x18\x01 \x01(\t\"i\n\x0cTraceContext\x12\x13\n\x0btraceParent\x18\x01 \x01(\t\x12\x12\n\x06spanID\x18\x02 \x01(\tB\x02\x18\x01\x12\x30\n\ntraceState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xe5\x03\n\x15\x45xecutionStartedEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\x12+\n\x0eparentInstance\x18\x05 \x01(\x0b\x32\x13.ParentInstanceInfo\x12;\n\x17scheduledStartTimestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12)\n\x12parentTraceContext\x18\x07 \x01(\x0b\x32\r.TraceContext\x12\x39\n\x13orchestrationSpanID\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x04tags\x18\t \x03(\x0b\x32 .ExecutionStartedEvent.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xa7\x01\n\x17\x45xecutionCompletedEvent\x12\x31\n\x13orchestrationStatus\x18\x01 \x01(\x0e\x32\x14.OrchestrationStatus\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x03 \x01(\x0b\x32\x13.TaskFailureDetails\"X\n\x18\x45xecutionTerminatedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x0f\n\x07recurse\x18\x02 \x01(\x08\"\x9e\x02\n\x12TaskScheduledEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x12parentTraceContext\x18\x04 \x01(\x0b\x32\r.TraceContext\x12\x17\n\x0ftaskExecutionId\x18\x05 \x01(\t\x12>\n\x17rerunParentInstanceInfo\x18\x06 \x01(\x0b\x32\x18.RerunParentInstanceInfoH\x00\x88\x01\x01\x42\x1a\n\x18_rerunParentInstanceInfo\"t\n\x12TaskCompletedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x17\n\x0ftaskExecutionId\x18\x03 \x01(\t\"p\n\x0fTaskFailedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\x12\x17\n\x0ftaskExecutionId\x18\x03 \x01(\t\"\xab\x02\n$SubOrchestrationInstanceCreatedEvent\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x12parentTraceContext\x18\x05 \x01(\x0b\x32\r.TraceContext\x12>\n\x17rerunParentInstanceInfo\x18\x06 \x01(\x0b\x32\x18.RerunParentInstanceInfoH\x00\x88\x01\x01\x42\x1a\n\x18_rerunParentInstanceInfo\"o\n&SubOrchestrationInstanceCompletedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"k\n#SubOrchestrationInstanceFailedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\"\xb7\x01\n\x11TimerCreatedEvent\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x11\n\x04name\x18\x02 \x01(\tH\x00\x88\x01\x01\x12>\n\x17rerunParentInstanceInfo\x18\x03 \x01(\x0b\x32\x18.RerunParentInstanceInfoH\x01\x88\x01\x01\x42\x07\n\x05_nameB\x1a\n\x18_rerunParentInstanceInfo\"N\n\x0fTimerFiredEvent\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0f\n\x07timerId\x18\x02 \x01(\x05\"S\n\x18OrchestratorStartedEvent\x12+\n\x07version\x18\x01 \x01(\x0b\x32\x15.OrchestrationVersionH\x00\x88\x01\x01\x42\n\n\x08_version\"\x1c\n\x1aOrchestratorCompletedEvent\"_\n\x0e\x45ventSentEvent\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"M\n\x10\x45ventRaisedEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x05input\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\":\n\x0cGenericEvent\x12*\n\x04\x64\x61ta\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"D\n\x11HistoryStateEvent\x12/\n\x12orchestrationState\x18\x01 \x01(\x0b\x32\x13.OrchestrationState\"A\n\x12\x43ontinueAsNewEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"F\n\x17\x45xecutionSuspendedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"D\n\x15\x45xecutionResumedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"a\n\x15\x45xecutionStalledEvent\x12\x1e\n\x06reason\x18\x01 \x01(\x0e\x32\x0e.StalledReason\x12\x18\n\x0b\x64\x65scription\x18\x02 \x01(\tH\x00\x88\x01\x01\x42\x0e\n\x0c_description\"\xdc\x01\n\x1c\x45ntityOperationSignaledEvent\x12\x11\n\trequestId\x18\x01 \x01(\t\x12\x11\n\toperation\x18\x02 \x01(\t\x12\x31\n\rscheduledTime\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10targetInstanceId\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xcb\x02\n\x1a\x45ntityOperationCalledEvent\x12\x11\n\trequestId\x18\x01 \x01(\t\x12\x11\n\toperation\x18\x02 \x01(\t\x12\x31\n\rscheduledTime\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10parentInstanceId\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x37\n\x11parentExecutionId\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10targetInstanceId\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x90\x01\n\x18\x45ntityLockRequestedEvent\x12\x19\n\x11\x63riticalSectionId\x18\x01 \x01(\t\x12\x0f\n\x07lockSet\x18\x02 \x03(\t\x12\x10\n\x08position\x18\x03 \x01(\x05\x12\x36\n\x10parentInstanceId\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"`\n\x1d\x45ntityOperationCompletedEvent\x12\x11\n\trequestId\x18\x01 \x01(\t\x12,\n\x06output\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\\\n\x1a\x45ntityOperationFailedEvent\x12\x11\n\trequestId\x18\x01 \x01(\t\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\"\xa2\x01\n\x15\x45ntityUnlockSentEvent\x12\x19\n\x11\x63riticalSectionId\x18\x01 \x01(\t\x12\x36\n\x10parentInstanceId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10targetInstanceId\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"3\n\x16\x45ntityLockGrantedEvent\x12\x19\n\x11\x63riticalSectionId\x18\x01 \x01(\t\"\x8d\r\n\x0cHistoryEvent\x12\x0f\n\x07\x65ventId\x18\x01 \x01(\x05\x12-\n\ttimestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x10\x65xecutionStarted\x18\x03 \x01(\x0b\x32\x16.ExecutionStartedEventH\x00\x12\x36\n\x12\x65xecutionCompleted\x18\x04 \x01(\x0b\x32\x18.ExecutionCompletedEventH\x00\x12\x38\n\x13\x65xecutionTerminated\x18\x05 \x01(\x0b\x32\x19.ExecutionTerminatedEventH\x00\x12,\n\rtaskScheduled\x18\x06 \x01(\x0b\x32\x13.TaskScheduledEventH\x00\x12,\n\rtaskCompleted\x18\x07 \x01(\x0b\x32\x13.TaskCompletedEventH\x00\x12&\n\ntaskFailed\x18\x08 \x01(\x0b\x32\x10.TaskFailedEventH\x00\x12P\n\x1fsubOrchestrationInstanceCreated\x18\t \x01(\x0b\x32%.SubOrchestrationInstanceCreatedEventH\x00\x12T\n!subOrchestrationInstanceCompleted\x18\n \x01(\x0b\x32\'.SubOrchestrationInstanceCompletedEventH\x00\x12N\n\x1esubOrchestrationInstanceFailed\x18\x0b \x01(\x0b\x32$.SubOrchestrationInstanceFailedEventH\x00\x12*\n\x0ctimerCreated\x18\x0c \x01(\x0b\x32\x12.TimerCreatedEventH\x00\x12&\n\ntimerFired\x18\r \x01(\x0b\x32\x10.TimerFiredEventH\x00\x12\x38\n\x13orchestratorStarted\x18\x0e \x01(\x0b\x32\x19.OrchestratorStartedEventH\x00\x12<\n\x15orchestratorCompleted\x18\x0f \x01(\x0b\x32\x1b.OrchestratorCompletedEventH\x00\x12$\n\teventSent\x18\x10 \x01(\x0b\x32\x0f.EventSentEventH\x00\x12(\n\x0b\x65ventRaised\x18\x11 \x01(\x0b\x32\x11.EventRaisedEventH\x00\x12%\n\x0cgenericEvent\x18\x12 \x01(\x0b\x32\r.GenericEventH\x00\x12*\n\x0chistoryState\x18\x13 \x01(\x0b\x32\x12.HistoryStateEventH\x00\x12,\n\rcontinueAsNew\x18\x14 \x01(\x0b\x32\x13.ContinueAsNewEventH\x00\x12\x36\n\x12\x65xecutionSuspended\x18\x15 \x01(\x0b\x32\x18.ExecutionSuspendedEventH\x00\x12\x32\n\x10\x65xecutionResumed\x18\x16 \x01(\x0b\x32\x16.ExecutionResumedEventH\x00\x12@\n\x17\x65ntityOperationSignaled\x18\x17 \x01(\x0b\x32\x1d.EntityOperationSignaledEventH\x00\x12<\n\x15\x65ntityOperationCalled\x18\x18 \x01(\x0b\x32\x1b.EntityOperationCalledEventH\x00\x12\x42\n\x18\x65ntityOperationCompleted\x18\x19 \x01(\x0b\x32\x1e.EntityOperationCompletedEventH\x00\x12<\n\x15\x65ntityOperationFailed\x18\x1a \x01(\x0b\x32\x1b.EntityOperationFailedEventH\x00\x12\x38\n\x13\x65ntityLockRequested\x18\x1b \x01(\x0b\x32\x19.EntityLockRequestedEventH\x00\x12\x34\n\x11\x65ntityLockGranted\x18\x1c \x01(\x0b\x32\x17.EntityLockGrantedEventH\x00\x12\x32\n\x10\x65ntityUnlockSent\x18\x1d \x01(\x0b\x32\x16.EntityUnlockSentEventH\x00\x12\x32\n\x10\x65xecutionStalled\x18\x1f \x01(\x0b\x32\x16.ExecutionStalledEventH\x00\x12 \n\x06router\x18\x1e \x01(\x0b\x32\x0b.TaskRouterH\x01\x88\x01\x01\x42\x0b\n\teventTypeB\t\n\x07_router\"\xc4\x01\n\x12ScheduleTaskAction\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12 \n\x06router\x18\x04 \x01(\x0b\x32\x0b.TaskRouterH\x00\x88\x01\x01\x12\x17\n\x0ftaskExecutionId\x18\x05 \x01(\tB\t\n\x07_router\"\xc9\x01\n\x1c\x43reateSubOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12 \n\x06router\x18\x05 \x01(\x0b\x32\x0b.TaskRouterH\x00\x88\x01\x01\x42\t\n\x07_router\"[\n\x11\x43reateTimerAction\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x11\n\x04name\x18\x02 \x01(\tH\x00\x88\x01\x01\x42\x07\n\x05_name\"u\n\x0fSendEventAction\x12(\n\x08instance\x18\x01 \x01(\x0b\x32\x16.OrchestrationInstance\x12\x0c\n\x04name\x18\x02 \x01(\t\x12*\n\x04\x64\x61ta\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xb4\x02\n\x1b\x43ompleteOrchestrationAction\x12\x31\n\x13orchestrationStatus\x18\x01 \x01(\x0e\x32\x14.OrchestrationStatus\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07\x64\x65tails\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\nnewVersion\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12&\n\x0f\x63\x61rryoverEvents\x18\x05 \x03(\x0b\x32\r.HistoryEvent\x12+\n\x0e\x66\x61ilureDetails\x18\x06 \x01(\x0b\x32\x13.TaskFailureDetails\"q\n\x1cTerminateOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x0f\n\x07recurse\x18\x03 \x01(\x08\"\x9c\x02\n\x17SendEntityMessageAction\x12@\n\x17\x65ntityOperationSignaled\x18\x01 \x01(\x0b\x32\x1d.EntityOperationSignaledEventH\x00\x12<\n\x15\x65ntityOperationCalled\x18\x02 \x01(\x0b\x32\x1b.EntityOperationCalledEventH\x00\x12\x38\n\x13\x65ntityLockRequested\x18\x03 \x01(\x0b\x32\x19.EntityLockRequestedEventH\x00\x12\x32\n\x10\x65ntityUnlockSent\x18\x04 \x01(\x0b\x32\x16.EntityUnlockSentEventH\x00\x42\x13\n\x11\x45ntityMessageType\"\'\n%OrchestratorVersionNotAvailableAction\"\xb1\x04\n\x12OrchestratorAction\x12\n\n\x02id\x18\x01 \x01(\x05\x12+\n\x0cscheduleTask\x18\x02 \x01(\x0b\x32\x13.ScheduleTaskActionH\x00\x12?\n\x16\x63reateSubOrchestration\x18\x03 \x01(\x0b\x32\x1d.CreateSubOrchestrationActionH\x00\x12)\n\x0b\x63reateTimer\x18\x04 \x01(\x0b\x32\x12.CreateTimerActionH\x00\x12%\n\tsendEvent\x18\x05 \x01(\x0b\x32\x10.SendEventActionH\x00\x12=\n\x15\x63ompleteOrchestration\x18\x06 \x01(\x0b\x32\x1c.CompleteOrchestrationActionH\x00\x12?\n\x16terminateOrchestration\x18\x07 \x01(\x0b\x32\x1d.TerminateOrchestrationActionH\x00\x12\x35\n\x11sendEntityMessage\x18\x08 \x01(\x0b\x32\x18.SendEntityMessageActionH\x00\x12Q\n\x1forchestratorVersionNotAvailable\x18\n \x01(\x0b\x32&.OrchestratorVersionNotAvailableActionH\x00\x12 \n\x06router\x18\t \x01(\x0b\x32\x0b.TaskRouterH\x01\x88\x01\x01\x42\x18\n\x16orchestratorActionTypeB\t\n\x07_router\"\xa9\x02\n\x13OrchestratorRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12!\n\npastEvents\x18\x03 \x03(\x0b\x32\r.HistoryEvent\x12 \n\tnewEvents\x18\x04 \x03(\x0b\x32\r.HistoryEvent\x12\x37\n\x10\x65ntityParameters\x18\x05 \x01(\x0b\x32\x1d.OrchestratorEntityParameters\x12 \n\x18requiresHistoryStreaming\x18\x06 \x01(\x08\x12 \n\x06router\x18\x07 \x01(\x0b\x32\x0b.TaskRouterH\x00\x88\x01\x01\x42\t\n\x07_router\"\x8f\x02\n\x14OrchestratorResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12$\n\x07\x61\x63tions\x18\x02 \x03(\x0b\x32\x13.OrchestratorAction\x12\x32\n\x0c\x63ustomStatus\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x17\n\x0f\x63ompletionToken\x18\x04 \x01(\t\x12\x37\n\x12numEventsProcessed\x18\x05 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12+\n\x07version\x18\x06 \x01(\x0b\x32\x15.OrchestrationVersionH\x00\x88\x01\x01\x42\n\n\x08_version\"\xce\x03\n\x15\x43reateInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12;\n\x17scheduledStartTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12?\n\x1aorchestrationIdReusePolicy\x18\x06 \x01(\x0b\x32\x1b.OrchestrationIdReusePolicy\x12\x31\n\x0b\x65xecutionId\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x04tags\x18\x08 \x03(\x0b\x32 .CreateInstanceRequest.TagsEntry\x12)\n\x12parentTraceContext\x18\t \x01(\x0b\x32\r.TraceContext\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"w\n\x1aOrchestrationIdReusePolicy\x12-\n\x0foperationStatus\x18\x01 \x03(\x0e\x32\x14.OrchestrationStatus\x12*\n\x06\x61\x63tion\x18\x02 \x01(\x0e\x32\x1a.CreateOrchestrationAction\",\n\x16\x43reateInstanceResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\"E\n\x12GetInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x1b\n\x13getInputsAndOutputs\x18\x02 \x01(\x08\"V\n\x13GetInstanceResponse\x12\x0e\n\x06\x65xists\x18\x01 \x01(\x08\x12/\n\x12orchestrationState\x18\x02 \x01(\x0b\x32\x13.OrchestrationState\"Y\n\x15RewindInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x18\n\x16RewindInstanceResponse\"\xfe\x05\n\x12OrchestrationState\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\x13orchestrationStatus\x18\x04 \x01(\x0e\x32\x14.OrchestrationStatus\x12;\n\x17scheduledStartTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x34\n\x10\x63reatedTimestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x38\n\x14lastUpdatedTimestamp\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\x05input\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12,\n\x06output\x18\t \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0c\x63ustomStatus\x18\n \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x0b \x01(\x0b\x32\x13.TaskFailureDetails\x12\x31\n\x0b\x65xecutionId\x18\x0c \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x12\x63ompletedTimestamp\x18\r \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x36\n\x10parentInstanceId\x18\x0e \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x04tags\x18\x0f \x03(\x0b\x32\x1d.OrchestrationState.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"b\n\x11RaiseEventRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x14\n\x12RaiseEventResponse\"g\n\x10TerminateRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06output\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x11\n\trecursive\x18\x03 \x01(\x08\"\x13\n\x11TerminateResponse\"R\n\x0eSuspendRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x11\n\x0fSuspendResponse\"Q\n\rResumeRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x10\n\x0eResumeResponse\"6\n\x15QueryInstancesRequest\x12\x1d\n\x05query\x18\x01 \x01(\x0b\x32\x0e.InstanceQuery\"\x82\x03\n\rInstanceQuery\x12+\n\rruntimeStatus\x18\x01 \x03(\x0e\x32\x14.OrchestrationStatus\x12\x33\n\x0f\x63reatedTimeFrom\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rcreatedTimeTo\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0ctaskHubNames\x18\x04 \x03(\x0b\x32\x1c.google.protobuf.StringValue\x12\x18\n\x10maxInstanceCount\x18\x05 \x01(\x05\x12\x37\n\x11\x63ontinuationToken\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10instanceIdPrefix\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1d\n\x15\x66\x65tchInputsAndOutputs\x18\x08 \x01(\x08\"\x82\x01\n\x16QueryInstancesResponse\x12/\n\x12orchestrationState\x18\x01 \x03(\x0b\x32\x13.OrchestrationState\x12\x37\n\x11\x63ontinuationToken\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x9e\x01\n\x15PurgeInstancesRequest\x12\x14\n\ninstanceId\x18\x01 \x01(\tH\x00\x12\x33\n\x13purgeInstanceFilter\x18\x02 \x01(\x0b\x32\x14.PurgeInstanceFilterH\x00\x12\x11\n\trecursive\x18\x03 \x01(\x08\x12\x12\n\x05\x66orce\x18\x04 \x01(\x08H\x01\x88\x01\x01\x42\t\n\x07requestB\x08\n\x06_force\"\xaa\x01\n\x13PurgeInstanceFilter\x12\x33\n\x0f\x63reatedTimeFrom\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rcreatedTimeTo\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\rruntimeStatus\x18\x03 \x03(\x0e\x32\x14.OrchestrationStatus\"f\n\x16PurgeInstancesResponse\x12\x1c\n\x14\x64\x65letedInstanceCount\x18\x01 \x01(\x05\x12.\n\nisComplete\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\"0\n\x14\x43reateTaskHubRequest\x12\x18\n\x10recreateIfExists\x18\x01 \x01(\x08\"\x17\n\x15\x43reateTaskHubResponse\"\x16\n\x14\x44\x65leteTaskHubRequest\"\x17\n\x15\x44\x65leteTaskHubResponse\"\xaa\x01\n\x13SignalEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x11\n\trequestId\x18\x04 \x01(\t\x12\x31\n\rscheduledTime\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\x16\n\x14SignalEntityResponse\"<\n\x10GetEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x14\n\x0cincludeState\x18\x02 \x01(\x08\"D\n\x11GetEntityResponse\x12\x0e\n\x06\x65xists\x18\x01 \x01(\x08\x12\x1f\n\x06\x65ntity\x18\x02 \x01(\x0b\x32\x0f.EntityMetadata\"\xcb\x02\n\x0b\x45ntityQuery\x12:\n\x14instanceIdStartsWith\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x34\n\x10lastModifiedFrom\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0elastModifiedTo\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x14\n\x0cincludeState\x18\x04 \x01(\x08\x12\x18\n\x10includeTransient\x18\x05 \x01(\x08\x12-\n\x08pageSize\x18\x06 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12\x37\n\x11\x63ontinuationToken\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"3\n\x14QueryEntitiesRequest\x12\x1b\n\x05query\x18\x01 \x01(\x0b\x32\x0c.EntityQuery\"s\n\x15QueryEntitiesResponse\x12!\n\x08\x65ntities\x18\x01 \x03(\x0b\x32\x0f.EntityMetadata\x12\x37\n\x11\x63ontinuationToken\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xdb\x01\n\x0e\x45ntityMetadata\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x34\n\x10lastModifiedTime\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x18\n\x10\x62\x61\x63klogQueueSize\x18\x03 \x01(\x05\x12.\n\x08lockedBy\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x0fserializedState\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x8f\x01\n\x19\x43leanEntityStorageRequest\x12\x37\n\x11\x63ontinuationToken\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1b\n\x13removeEmptyEntities\x18\x02 \x01(\x08\x12\x1c\n\x14releaseOrphanedLocks\x18\x03 \x01(\x08\"\x92\x01\n\x1a\x43leanEntityStorageResponse\x12\x37\n\x11\x63ontinuationToken\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1c\n\x14\x65mptyEntitiesRemoved\x18\x02 \x01(\x05\x12\x1d\n\x15orphanedLocksReleased\x18\x03 \x01(\x05\"]\n\x1cOrchestratorEntityParameters\x12=\n\x1a\x65ntityMessageReorderWindow\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\"\x82\x01\n\x12\x45ntityBatchRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65ntityState\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12%\n\noperations\x18\x03 \x03(\x0b\x32\x11.OperationRequest\"\xfa\x01\n\x11\x45ntityBatchResult\x12!\n\x07results\x18\x01 \x03(\x0b\x32\x10.OperationResult\x12!\n\x07\x61\x63tions\x18\x02 \x03(\x0b\x32\x10.OperationAction\x12\x31\n\x0b\x65ntityState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\x12\x17\n\x0f\x63ompletionToken\x18\x05 \x01(\t\x12&\n\x0eoperationInfos\x18\x06 \x03(\x0b\x32\x0e.OperationInfo\"\x95\x01\n\rEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x13\n\x0b\x65xecutionId\x18\x02 \x01(\t\x12\x31\n\x0b\x65ntityState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12(\n\x11operationRequests\x18\x04 \x03(\x0b\x32\r.HistoryEvent\"e\n\x10OperationRequest\x12\x11\n\toperation\x18\x01 \x01(\t\x12\x11\n\trequestId\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"w\n\x0fOperationResult\x12*\n\x07success\x18\x01 \x01(\x0b\x32\x17.OperationResultSuccessH\x00\x12*\n\x07\x66\x61ilure\x18\x02 \x01(\x0b\x32\x17.OperationResultFailureH\x00\x42\x0c\n\nresultType\"W\n\rOperationInfo\x12\x11\n\trequestId\x18\x01 \x01(\t\x12\x33\n\x13responseDestination\x18\x02 \x01(\x0b\x32\x16.OrchestrationInstance\"F\n\x16OperationResultSuccess\x12,\n\x06result\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"E\n\x16OperationResultFailure\x12+\n\x0e\x66\x61ilureDetails\x18\x01 \x01(\x0b\x32\x13.TaskFailureDetails\"\x9c\x01\n\x0fOperationAction\x12\n\n\x02id\x18\x01 \x01(\x05\x12\'\n\nsendSignal\x18\x02 \x01(\x0b\x32\x11.SendSignalActionH\x00\x12=\n\x15startNewOrchestration\x18\x03 \x01(\x0b\x32\x1c.StartNewOrchestrationActionH\x00\x42\x15\n\x13operationActionType\"\x94\x01\n\x10SendSignalAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rscheduledTime\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xce\x01\n\x1bStartNewOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rscheduledTime\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"5\n\x1a\x41\x62\x61ndonActivityTaskRequest\x12\x17\n\x0f\x63ompletionToken\x18\x01 \x01(\t\"\x1d\n\x1b\x41\x62\x61ndonActivityTaskResponse\":\n\x1f\x41\x62\x61ndonOrchestrationTaskRequest\x12\x17\n\x0f\x63ompletionToken\x18\x01 \x01(\t\"\"\n AbandonOrchestrationTaskResponse\"3\n\x18\x41\x62\x61ndonEntityTaskRequest\x12\x17\n\x0f\x63ompletionToken\x18\x01 \x01(\t\"\x1b\n\x19\x41\x62\x61ndonEntityTaskResponse\"\xb9\x01\n\x13GetWorkItemsRequest\x12+\n#maxConcurrentOrchestrationWorkItems\x18\x01 \x01(\x05\x12&\n\x1emaxConcurrentActivityWorkItems\x18\x02 \x01(\x05\x12$\n\x1cmaxConcurrentEntityWorkItems\x18\x03 \x01(\x05\x12\'\n\x0c\x63\x61pabilities\x18\n \x03(\x0e\x32\x11.WorkerCapability\"\x8c\x02\n\x08WorkItem\x12\x33\n\x13orchestratorRequest\x18\x01 \x01(\x0b\x32\x14.OrchestratorRequestH\x00\x12+\n\x0f\x61\x63tivityRequest\x18\x02 \x01(\x0b\x32\x10.ActivityRequestH\x00\x12,\n\rentityRequest\x18\x03 \x01(\x0b\x32\x13.EntityBatchRequestH\x00\x12!\n\nhealthPing\x18\x04 \x01(\x0b\x32\x0b.HealthPingH\x00\x12)\n\x0f\x65ntityRequestV2\x18\x05 \x01(\x0b\x32\x0e.EntityRequestH\x00\x12\x17\n\x0f\x63ompletionToken\x18\n \x01(\tB\t\n\x07request\"\x16\n\x14\x43ompleteTaskResponse\"\x0c\n\nHealthPing\"\x84\x01\n\x1cStreamInstanceHistoryRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1d\n\x15\x66orWorkItemProcessing\x18\x03 \x01(\x08\"-\n\x0cHistoryChunk\x12\x1d\n\x06\x65vents\x18\x01 \x03(\x0b\x32\r.HistoryEvent\"\x85\x02\n\x1dRerunWorkflowFromEventRequest\x12\x18\n\x10sourceInstanceID\x18\x01 \x01(\t\x12\x0f\n\x07\x65ventID\x18\x02 \x01(\r\x12\x1a\n\rnewInstanceID\x18\x03 \x01(\tH\x00\x88\x01\x01\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x16\n\x0eoverwriteInput\x18\x05 \x01(\x08\x12\'\n\x1anewChildWorkflowInstanceID\x18\x06 \x01(\tH\x01\x88\x01\x01\x42\x10\n\x0e_newInstanceIDB\x1d\n\x1b_newChildWorkflowInstanceID\"7\n\x1eRerunWorkflowFromEventResponse\x12\x15\n\rnewInstanceID\x18\x01 \x01(\t\"r\n\x16ListInstanceIDsRequest\x12\x1e\n\x11\x63ontinuationToken\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x15\n\x08pageSize\x18\x02 \x01(\rH\x01\x88\x01\x01\x42\x14\n\x12_continuationTokenB\x0b\n\t_pageSize\"d\n\x17ListInstanceIDsResponse\x12\x13\n\x0binstanceIds\x18\x01 \x03(\t\x12\x1e\n\x11\x63ontinuationToken\x18\x02 \x01(\tH\x00\x88\x01\x01\x42\x14\n\x12_continuationToken\"/\n\x19GetInstanceHistoryRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\";\n\x1aGetInstanceHistoryResponse\x12\x1d\n\x06\x65vents\x18\x01 \x03(\x0b\x32\r.HistoryEvent*>\n\rStalledReason\x12\x12\n\x0ePATCH_MISMATCH\x10\x00\x12\x19\n\x15VERSION_NOT_AVAILABLE\x10\x01*\xd7\x02\n\x13OrchestrationStatus\x12 \n\x1cORCHESTRATION_STATUS_RUNNING\x10\x00\x12\"\n\x1eORCHESTRATION_STATUS_COMPLETED\x10\x01\x12)\n%ORCHESTRATION_STATUS_CONTINUED_AS_NEW\x10\x02\x12\x1f\n\x1bORCHESTRATION_STATUS_FAILED\x10\x03\x12!\n\x1dORCHESTRATION_STATUS_CANCELED\x10\x04\x12#\n\x1fORCHESTRATION_STATUS_TERMINATED\x10\x05\x12 \n\x1cORCHESTRATION_STATUS_PENDING\x10\x06\x12\"\n\x1eORCHESTRATION_STATUS_SUSPENDED\x10\x07\x12 \n\x1cORCHESTRATION_STATUS_STALLED\x10\x08*A\n\x19\x43reateOrchestrationAction\x12\t\n\x05\x45RROR\x10\x00\x12\n\n\x06IGNORE\x10\x01\x12\r\n\tTERMINATE\x10\x02*^\n\x10WorkerCapability\x12!\n\x1dWORKER_CAPABILITY_UNSPECIFIED\x10\x00\x12\'\n#WORKER_CAPABILITY_HISTORY_STREAMING\x10\x01\x32\xcb\x0f\n\x15TaskHubSidecarService\x12\x37\n\x05Hello\x12\x16.google.protobuf.Empty\x1a\x16.google.protobuf.Empty\x12@\n\rStartInstance\x12\x16.CreateInstanceRequest\x1a\x17.CreateInstanceResponse\x12\x38\n\x0bGetInstance\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x41\n\x0eRewindInstance\x12\x16.RewindInstanceRequest\x1a\x17.RewindInstanceResponse\x12\x41\n\x14WaitForInstanceStart\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x46\n\x19WaitForInstanceCompletion\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x35\n\nRaiseEvent\x12\x12.RaiseEventRequest\x1a\x13.RaiseEventResponse\x12:\n\x11TerminateInstance\x12\x11.TerminateRequest\x1a\x12.TerminateResponse\x12\x34\n\x0fSuspendInstance\x12\x0f.SuspendRequest\x1a\x10.SuspendResponse\x12\x31\n\x0eResumeInstance\x12\x0e.ResumeRequest\x1a\x0f.ResumeResponse\x12\x41\n\x0eQueryInstances\x12\x16.QueryInstancesRequest\x1a\x17.QueryInstancesResponse\x12\x41\n\x0ePurgeInstances\x12\x16.PurgeInstancesRequest\x1a\x17.PurgeInstancesResponse\x12\x31\n\x0cGetWorkItems\x12\x14.GetWorkItemsRequest\x1a\t.WorkItem0\x01\x12@\n\x14\x43ompleteActivityTask\x12\x11.ActivityResponse\x1a\x15.CompleteTaskResponse\x12H\n\x18\x43ompleteOrchestratorTask\x12\x15.OrchestratorResponse\x1a\x15.CompleteTaskResponse\x12?\n\x12\x43ompleteEntityTask\x12\x12.EntityBatchResult\x1a\x15.CompleteTaskResponse\x12G\n\x15StreamInstanceHistory\x12\x1d.StreamInstanceHistoryRequest\x1a\r.HistoryChunk0\x01\x12>\n\rCreateTaskHub\x12\x15.CreateTaskHubRequest\x1a\x16.CreateTaskHubResponse\x12>\n\rDeleteTaskHub\x12\x15.DeleteTaskHubRequest\x1a\x16.DeleteTaskHubResponse\x12;\n\x0cSignalEntity\x12\x14.SignalEntityRequest\x1a\x15.SignalEntityResponse\x12\x32\n\tGetEntity\x12\x11.GetEntityRequest\x1a\x12.GetEntityResponse\x12>\n\rQueryEntities\x12\x15.QueryEntitiesRequest\x1a\x16.QueryEntitiesResponse\x12M\n\x12\x43leanEntityStorage\x12\x1a.CleanEntityStorageRequest\x1a\x1b.CleanEntityStorageResponse\x12X\n\x1b\x41\x62\x61ndonTaskActivityWorkItem\x12\x1b.AbandonActivityTaskRequest\x1a\x1c.AbandonActivityTaskResponse\x12\x66\n\x1f\x41\x62\x61ndonTaskOrchestratorWorkItem\x12 .AbandonOrchestrationTaskRequest\x1a!.AbandonOrchestrationTaskResponse\x12R\n\x19\x41\x62\x61ndonTaskEntityWorkItem\x12\x19.AbandonEntityTaskRequest\x1a\x1a.AbandonEntityTaskResponse\x12Y\n\x16RerunWorkflowFromEvent\x12\x1e.RerunWorkflowFromEventRequest\x1a\x1f.RerunWorkflowFromEventResponse\x12\x44\n\x0fListInstanceIDs\x12\x17.ListInstanceIDsRequest\x1a\x18.ListInstanceIDsResponse\x12M\n\x12GetInstanceHistory\x12\x1a.GetInstanceHistoryRequest\x1a\x1b.GetInstanceHistoryResponseBV\n+io.dapr.durabletask.implementation.protobufZ\x0b/api/protos\xaa\x02\x19\x44\x61pr.DurableTask.Protobufb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'orchestrator_service_pb2', _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'durabletask.internal.orchestrator_service_pb2', _globals) if not _descriptor._USE_C_DESCRIPTORS: _globals['DESCRIPTOR']._loaded_options = None - _globals['DESCRIPTOR']._serialized_options = b'\n1com.microsoft.durabletask.implementation.protobufZ\020/internal/protos\252\002\036Microsoft.DurableTask.Protobuf' + _globals['DESCRIPTOR']._serialized_options = b'\n+io.dapr.durabletask.implementation.protobufZ\013/api/protos\252\002\031Dapr.DurableTask.Protobuf' _globals['_TRACECONTEXT'].fields_by_name['spanID']._loaded_options = None _globals['_TRACECONTEXT'].fields_by_name['spanID']._serialized_options = b'\030\001' + _globals['_EXECUTIONSTARTEDEVENT_TAGSENTRY']._loaded_options = None + _globals['_EXECUTIONSTARTEDEVENT_TAGSENTRY']._serialized_options = b'8\001' _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._loaded_options = None _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_options = b'8\001' - _globals['_ORCHESTRATIONSTATUS']._serialized_start=12076 - _globals['_ORCHESTRATIONSTATUS']._serialized_end=12385 - _globals['_CREATEORCHESTRATIONACTION']._serialized_start=12387 - _globals['_CREATEORCHESTRATIONACTION']._serialized_end=12452 - _globals['_ORCHESTRATIONINSTANCE']._serialized_start=156 - _globals['_ORCHESTRATIONINSTANCE']._serialized_end=250 - _globals['_ACTIVITYREQUEST']._serialized_start=253 - _globals['_ACTIVITYREQUEST']._serialized_end=490 - _globals['_ACTIVITYRESPONSE']._serialized_start=493 - _globals['_ACTIVITYRESPONSE']._serialized_end=638 - _globals['_TASKFAILUREDETAILS']._serialized_start=641 - _globals['_TASKFAILUREDETAILS']._serialized_end=819 - _globals['_PARENTINSTANCEINFO']._serialized_start=822 - _globals['_PARENTINSTANCEINFO']._serialized_end=1013 - _globals['_TRACECONTEXT']._serialized_start=1015 - _globals['_TRACECONTEXT']._serialized_end=1120 - _globals['_EXECUTIONSTARTEDEVENT']._serialized_start=1123 - _globals['_EXECUTIONSTARTEDEVENT']._serialized_end=1515 - _globals['_EXECUTIONCOMPLETEDEVENT']._serialized_start=1518 - _globals['_EXECUTIONCOMPLETEDEVENT']._serialized_end=1685 - _globals['_EXECUTIONTERMINATEDEVENT']._serialized_start=1687 - _globals['_EXECUTIONTERMINATEDEVENT']._serialized_end=1775 - _globals['_TASKSCHEDULEDEVENT']._serialized_start=1778 - _globals['_TASKSCHEDULEDEVENT']._serialized_end=1947 - _globals['_TASKCOMPLETEDEVENT']._serialized_start=1949 - _globals['_TASKCOMPLETEDEVENT']._serialized_end=2040 - _globals['_TASKFAILEDEVENT']._serialized_start=2042 - _globals['_TASKFAILEDEVENT']._serialized_end=2129 - _globals['_SUBORCHESTRATIONINSTANCECREATEDEVENT']._serialized_start=2132 - _globals['_SUBORCHESTRATIONINSTANCECREATEDEVENT']._serialized_end=2339 - _globals['_SUBORCHESTRATIONINSTANCECOMPLETEDEVENT']._serialized_start=2341 - _globals['_SUBORCHESTRATIONINSTANCECOMPLETEDEVENT']._serialized_end=2452 - _globals['_SUBORCHESTRATIONINSTANCEFAILEDEVENT']._serialized_start=2454 - _globals['_SUBORCHESTRATIONINSTANCEFAILEDEVENT']._serialized_end=2561 - _globals['_TIMERCREATEDEVENT']._serialized_start=2563 - _globals['_TIMERCREATEDEVENT']._serialized_end=2626 - _globals['_TIMERFIREDEVENT']._serialized_start=2628 - _globals['_TIMERFIREDEVENT']._serialized_end=2706 - _globals['_ORCHESTRATORSTARTEDEVENT']._serialized_start=2708 - _globals['_ORCHESTRATORSTARTEDEVENT']._serialized_end=2734 - _globals['_ORCHESTRATORCOMPLETEDEVENT']._serialized_start=2736 - _globals['_ORCHESTRATORCOMPLETEDEVENT']._serialized_end=2764 - _globals['_EVENTSENTEVENT']._serialized_start=2766 - _globals['_EVENTSENTEVENT']._serialized_end=2861 - _globals['_EVENTRAISEDEVENT']._serialized_start=2863 - _globals['_EVENTRAISEDEVENT']._serialized_end=2940 - _globals['_GENERICEVENT']._serialized_start=2942 - _globals['_GENERICEVENT']._serialized_end=3000 - _globals['_HISTORYSTATEEVENT']._serialized_start=3002 - _globals['_HISTORYSTATEEVENT']._serialized_end=3070 - _globals['_CONTINUEASNEWEVENT']._serialized_start=3072 - _globals['_CONTINUEASNEWEVENT']._serialized_end=3137 - _globals['_EXECUTIONSUSPENDEDEVENT']._serialized_start=3139 - _globals['_EXECUTIONSUSPENDEDEVENT']._serialized_end=3209 - _globals['_EXECUTIONRESUMEDEVENT']._serialized_start=3211 - _globals['_EXECUTIONRESUMEDEVENT']._serialized_end=3279 - _globals['_HISTORYEVENT']._serialized_start=3282 - _globals['_HISTORYEVENT']._serialized_end=4440 - _globals['_SCHEDULETASKACTION']._serialized_start=4442 - _globals['_SCHEDULETASKACTION']._serialized_end=4568 - _globals['_CREATESUBORCHESTRATIONACTION']._serialized_start=4571 - _globals['_CREATESUBORCHESTRATIONACTION']._serialized_end=4727 - _globals['_CREATETIMERACTION']._serialized_start=4729 - _globals['_CREATETIMERACTION']._serialized_end=4792 - _globals['_SENDEVENTACTION']._serialized_start=4794 - _globals['_SENDEVENTACTION']._serialized_end=4911 - _globals['_COMPLETEORCHESTRATIONACTION']._serialized_start=4914 - _globals['_COMPLETEORCHESTRATIONACTION']._serialized_end=5222 - _globals['_TERMINATEORCHESTRATIONACTION']._serialized_start=5224 - _globals['_TERMINATEORCHESTRATIONACTION']._serialized_end=5337 - _globals['_ORCHESTRATORACTION']._serialized_start=5340 - _globals['_ORCHESTRATORACTION']._serialized_end=5718 - _globals['_ORCHESTRATORREQUEST']._serialized_start=5721 - _globals['_ORCHESTRATORREQUEST']._serialized_end=5939 - _globals['_ORCHESTRATORRESPONSE']._serialized_start=5942 - _globals['_ORCHESTRATORRESPONSE']._serialized_end=6074 - _globals['_CREATEINSTANCEREQUEST']._serialized_start=6077 - _globals['_CREATEINSTANCEREQUEST']._serialized_end=6496 - _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_start=6453 - _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_end=6496 - _globals['_ORCHESTRATIONIDREUSEPOLICY']._serialized_start=6498 - _globals['_ORCHESTRATIONIDREUSEPOLICY']._serialized_end=6617 - _globals['_CREATEINSTANCERESPONSE']._serialized_start=6619 - _globals['_CREATEINSTANCERESPONSE']._serialized_end=6663 - _globals['_GETINSTANCEREQUEST']._serialized_start=6665 - _globals['_GETINSTANCEREQUEST']._serialized_end=6734 - _globals['_GETINSTANCERESPONSE']._serialized_start=6736 - _globals['_GETINSTANCERESPONSE']._serialized_end=6822 - _globals['_REWINDINSTANCEREQUEST']._serialized_start=6824 - _globals['_REWINDINSTANCEREQUEST']._serialized_end=6913 - _globals['_REWINDINSTANCERESPONSE']._serialized_start=6915 - _globals['_REWINDINSTANCERESPONSE']._serialized_end=6939 - _globals['_ORCHESTRATIONSTATE']._serialized_start=6942 - _globals['_ORCHESTRATIONSTATE']._serialized_end=7618 - _globals['_RAISEEVENTREQUEST']._serialized_start=7620 - _globals['_RAISEEVENTREQUEST']._serialized_end=7718 - _globals['_RAISEEVENTRESPONSE']._serialized_start=7720 - _globals['_RAISEEVENTRESPONSE']._serialized_end=7740 - _globals['_TERMINATEREQUEST']._serialized_start=7742 - _globals['_TERMINATEREQUEST']._serialized_end=7845 - _globals['_TERMINATERESPONSE']._serialized_start=7847 - _globals['_TERMINATERESPONSE']._serialized_end=7866 - _globals['_SUSPENDREQUEST']._serialized_start=7868 - _globals['_SUSPENDREQUEST']._serialized_end=7950 - _globals['_SUSPENDRESPONSE']._serialized_start=7952 - _globals['_SUSPENDRESPONSE']._serialized_end=7969 - _globals['_RESUMEREQUEST']._serialized_start=7971 - _globals['_RESUMEREQUEST']._serialized_end=8052 - _globals['_RESUMERESPONSE']._serialized_start=8054 - _globals['_RESUMERESPONSE']._serialized_end=8070 - _globals['_QUERYINSTANCESREQUEST']._serialized_start=8072 - _globals['_QUERYINSTANCESREQUEST']._serialized_end=8126 - _globals['_INSTANCEQUERY']._serialized_start=8129 - _globals['_INSTANCEQUERY']._serialized_end=8515 - _globals['_QUERYINSTANCESRESPONSE']._serialized_start=8518 - _globals['_QUERYINSTANCESRESPONSE']._serialized_end=8648 - _globals['_PURGEINSTANCESREQUEST']._serialized_start=8651 - _globals['_PURGEINSTANCESREQUEST']._serialized_end=8779 - _globals['_PURGEINSTANCEFILTER']._serialized_start=8782 - _globals['_PURGEINSTANCEFILTER']._serialized_end=8952 - _globals['_PURGEINSTANCESRESPONSE']._serialized_start=8954 - _globals['_PURGEINSTANCESRESPONSE']._serialized_end=9008 - _globals['_CREATETASKHUBREQUEST']._serialized_start=9010 - _globals['_CREATETASKHUBREQUEST']._serialized_end=9058 - _globals['_CREATETASKHUBRESPONSE']._serialized_start=9060 - _globals['_CREATETASKHUBRESPONSE']._serialized_end=9083 - _globals['_DELETETASKHUBREQUEST']._serialized_start=9085 - _globals['_DELETETASKHUBREQUEST']._serialized_end=9107 - _globals['_DELETETASKHUBRESPONSE']._serialized_start=9109 - _globals['_DELETETASKHUBRESPONSE']._serialized_end=9132 - _globals['_SIGNALENTITYREQUEST']._serialized_start=9135 - _globals['_SIGNALENTITYREQUEST']._serialized_end=9305 - _globals['_SIGNALENTITYRESPONSE']._serialized_start=9307 - _globals['_SIGNALENTITYRESPONSE']._serialized_end=9329 - _globals['_GETENTITYREQUEST']._serialized_start=9331 - _globals['_GETENTITYREQUEST']._serialized_end=9391 - _globals['_GETENTITYRESPONSE']._serialized_start=9393 - _globals['_GETENTITYRESPONSE']._serialized_end=9461 - _globals['_ENTITYQUERY']._serialized_start=9464 - _globals['_ENTITYQUERY']._serialized_end=9795 - _globals['_QUERYENTITIESREQUEST']._serialized_start=9797 - _globals['_QUERYENTITIESREQUEST']._serialized_end=9848 - _globals['_QUERYENTITIESRESPONSE']._serialized_start=9850 - _globals['_QUERYENTITIESRESPONSE']._serialized_end=9965 - _globals['_ENTITYMETADATA']._serialized_start=9968 - _globals['_ENTITYMETADATA']._serialized_end=10187 - _globals['_CLEANENTITYSTORAGEREQUEST']._serialized_start=10190 - _globals['_CLEANENTITYSTORAGEREQUEST']._serialized_end=10333 - _globals['_CLEANENTITYSTORAGERESPONSE']._serialized_start=10336 - _globals['_CLEANENTITYSTORAGERESPONSE']._serialized_end=10482 - _globals['_ORCHESTRATORENTITYPARAMETERS']._serialized_start=10484 - _globals['_ORCHESTRATORENTITYPARAMETERS']._serialized_end=10577 - _globals['_ENTITYBATCHREQUEST']._serialized_start=10580 - _globals['_ENTITYBATCHREQUEST']._serialized_end=10710 - _globals['_ENTITYBATCHRESULT']._serialized_start=10713 - _globals['_ENTITYBATCHRESULT']._serialized_end=10898 - _globals['_OPERATIONREQUEST']._serialized_start=10900 - _globals['_OPERATIONREQUEST']._serialized_end=11001 - _globals['_OPERATIONRESULT']._serialized_start=11003 - _globals['_OPERATIONRESULT']._serialized_end=11122 - _globals['_OPERATIONRESULTSUCCESS']._serialized_start=11124 - _globals['_OPERATIONRESULTSUCCESS']._serialized_end=11194 - _globals['_OPERATIONRESULTFAILURE']._serialized_start=11196 - _globals['_OPERATIONRESULTFAILURE']._serialized_end=11265 - _globals['_OPERATIONACTION']._serialized_start=11268 - _globals['_OPERATIONACTION']._serialized_end=11424 - _globals['_SENDSIGNALACTION']._serialized_start=11427 - _globals['_SENDSIGNALACTION']._serialized_end=11575 - _globals['_STARTNEWORCHESTRATIONACTION']._serialized_start=11578 - _globals['_STARTNEWORCHESTRATIONACTION']._serialized_end=11784 - _globals['_GETWORKITEMSREQUEST']._serialized_start=11786 - _globals['_GETWORKITEMSREQUEST']._serialized_end=11807 - _globals['_WORKITEM']._serialized_start=11810 - _globals['_WORKITEM']._serialized_end=12035 - _globals['_COMPLETETASKRESPONSE']._serialized_start=12037 - _globals['_COMPLETETASKRESPONSE']._serialized_end=12059 - _globals['_HEALTHPING']._serialized_start=12061 - _globals['_HEALTHPING']._serialized_end=12073 - _globals['_TASKHUBSIDECARSERVICE']._serialized_start=12455 - _globals['_TASKHUBSIDECARSERVICE']._serialized_end=13859 + _globals['_ORCHESTRATIONSTATE_TAGSENTRY']._loaded_options = None + _globals['_ORCHESTRATIONSTATE_TAGSENTRY']._serialized_options = b'8\001' + _globals['_STALLEDREASON']._serialized_start=17324 + _globals['_STALLEDREASON']._serialized_end=17386 + _globals['_ORCHESTRATIONSTATUS']._serialized_start=17389 + _globals['_ORCHESTRATIONSTATUS']._serialized_end=17732 + _globals['_CREATEORCHESTRATIONACTION']._serialized_start=17734 + _globals['_CREATEORCHESTRATIONACTION']._serialized_end=17799 + _globals['_WORKERCAPABILITY']._serialized_start=17801 + _globals['_WORKERCAPABILITY']._serialized_end=17895 + _globals['_TASKROUTER']._serialized_start=177 + _globals['_TASKROUTER']._serialized_end=252 + _globals['_ORCHESTRATIONVERSION']._serialized_start=254 + _globals['_ORCHESTRATIONVERSION']._serialized_end=321 + _globals['_ORCHESTRATIONINSTANCE']._serialized_start=323 + _globals['_ORCHESTRATIONINSTANCE']._serialized_end=417 + _globals['_ACTIVITYREQUEST']._serialized_start=420 + _globals['_ACTIVITYREQUEST']._serialized_end=682 + _globals['_ACTIVITYRESPONSE']._serialized_start=685 + _globals['_ACTIVITYRESPONSE']._serialized_end=855 + _globals['_TASKFAILUREDETAILS']._serialized_start=858 + _globals['_TASKFAILUREDETAILS']._serialized_end=1036 + _globals['_PARENTINSTANCEINFO']._serialized_start=1039 + _globals['_PARENTINSTANCEINFO']._serialized_end=1260 + _globals['_RERUNPARENTINSTANCEINFO']._serialized_start=1262 + _globals['_RERUNPARENTINSTANCEINFO']._serialized_end=1307 + _globals['_TRACECONTEXT']._serialized_start=1309 + _globals['_TRACECONTEXT']._serialized_end=1414 + _globals['_EXECUTIONSTARTEDEVENT']._serialized_start=1417 + _globals['_EXECUTIONSTARTEDEVENT']._serialized_end=1902 + _globals['_EXECUTIONSTARTEDEVENT_TAGSENTRY']._serialized_start=1859 + _globals['_EXECUTIONSTARTEDEVENT_TAGSENTRY']._serialized_end=1902 + _globals['_EXECUTIONCOMPLETEDEVENT']._serialized_start=1905 + _globals['_EXECUTIONCOMPLETEDEVENT']._serialized_end=2072 + _globals['_EXECUTIONTERMINATEDEVENT']._serialized_start=2074 + _globals['_EXECUTIONTERMINATEDEVENT']._serialized_end=2162 + _globals['_TASKSCHEDULEDEVENT']._serialized_start=2165 + _globals['_TASKSCHEDULEDEVENT']._serialized_end=2451 + _globals['_TASKCOMPLETEDEVENT']._serialized_start=2453 + _globals['_TASKCOMPLETEDEVENT']._serialized_end=2569 + _globals['_TASKFAILEDEVENT']._serialized_start=2571 + _globals['_TASKFAILEDEVENT']._serialized_end=2683 + _globals['_SUBORCHESTRATIONINSTANCECREATEDEVENT']._serialized_start=2686 + _globals['_SUBORCHESTRATIONINSTANCECREATEDEVENT']._serialized_end=2985 + _globals['_SUBORCHESTRATIONINSTANCECOMPLETEDEVENT']._serialized_start=2987 + _globals['_SUBORCHESTRATIONINSTANCECOMPLETEDEVENT']._serialized_end=3098 + _globals['_SUBORCHESTRATIONINSTANCEFAILEDEVENT']._serialized_start=3100 + _globals['_SUBORCHESTRATIONINSTANCEFAILEDEVENT']._serialized_end=3207 + _globals['_TIMERCREATEDEVENT']._serialized_start=3210 + _globals['_TIMERCREATEDEVENT']._serialized_end=3393 + _globals['_TIMERFIREDEVENT']._serialized_start=3395 + _globals['_TIMERFIREDEVENT']._serialized_end=3473 + _globals['_ORCHESTRATORSTARTEDEVENT']._serialized_start=3475 + _globals['_ORCHESTRATORSTARTEDEVENT']._serialized_end=3558 + _globals['_ORCHESTRATORCOMPLETEDEVENT']._serialized_start=3560 + _globals['_ORCHESTRATORCOMPLETEDEVENT']._serialized_end=3588 + _globals['_EVENTSENTEVENT']._serialized_start=3590 + _globals['_EVENTSENTEVENT']._serialized_end=3685 + _globals['_EVENTRAISEDEVENT']._serialized_start=3687 + _globals['_EVENTRAISEDEVENT']._serialized_end=3764 + _globals['_GENERICEVENT']._serialized_start=3766 + _globals['_GENERICEVENT']._serialized_end=3824 + _globals['_HISTORYSTATEEVENT']._serialized_start=3826 + _globals['_HISTORYSTATEEVENT']._serialized_end=3894 + _globals['_CONTINUEASNEWEVENT']._serialized_start=3896 + _globals['_CONTINUEASNEWEVENT']._serialized_end=3961 + _globals['_EXECUTIONSUSPENDEDEVENT']._serialized_start=3963 + _globals['_EXECUTIONSUSPENDEDEVENT']._serialized_end=4033 + _globals['_EXECUTIONRESUMEDEVENT']._serialized_start=4035 + _globals['_EXECUTIONRESUMEDEVENT']._serialized_end=4103 + _globals['_EXECUTIONSTALLEDEVENT']._serialized_start=4105 + _globals['_EXECUTIONSTALLEDEVENT']._serialized_end=4202 + _globals['_ENTITYOPERATIONSIGNALEDEVENT']._serialized_start=4205 + _globals['_ENTITYOPERATIONSIGNALEDEVENT']._serialized_end=4425 + _globals['_ENTITYOPERATIONCALLEDEVENT']._serialized_start=4428 + _globals['_ENTITYOPERATIONCALLEDEVENT']._serialized_end=4759 + _globals['_ENTITYLOCKREQUESTEDEVENT']._serialized_start=4762 + _globals['_ENTITYLOCKREQUESTEDEVENT']._serialized_end=4906 + _globals['_ENTITYOPERATIONCOMPLETEDEVENT']._serialized_start=4908 + _globals['_ENTITYOPERATIONCOMPLETEDEVENT']._serialized_end=5004 + _globals['_ENTITYOPERATIONFAILEDEVENT']._serialized_start=5006 + _globals['_ENTITYOPERATIONFAILEDEVENT']._serialized_end=5098 + _globals['_ENTITYUNLOCKSENTEVENT']._serialized_start=5101 + _globals['_ENTITYUNLOCKSENTEVENT']._serialized_end=5263 + _globals['_ENTITYLOCKGRANTEDEVENT']._serialized_start=5265 + _globals['_ENTITYLOCKGRANTEDEVENT']._serialized_end=5316 + _globals['_HISTORYEVENT']._serialized_start=5319 + _globals['_HISTORYEVENT']._serialized_end=6996 + _globals['_SCHEDULETASKACTION']._serialized_start=6999 + _globals['_SCHEDULETASKACTION']._serialized_end=7195 + _globals['_CREATESUBORCHESTRATIONACTION']._serialized_start=7198 + _globals['_CREATESUBORCHESTRATIONACTION']._serialized_end=7399 + _globals['_CREATETIMERACTION']._serialized_start=7401 + _globals['_CREATETIMERACTION']._serialized_end=7492 + _globals['_SENDEVENTACTION']._serialized_start=7494 + _globals['_SENDEVENTACTION']._serialized_end=7611 + _globals['_COMPLETEORCHESTRATIONACTION']._serialized_start=7614 + _globals['_COMPLETEORCHESTRATIONACTION']._serialized_end=7922 + _globals['_TERMINATEORCHESTRATIONACTION']._serialized_start=7924 + _globals['_TERMINATEORCHESTRATIONACTION']._serialized_end=8037 + _globals['_SENDENTITYMESSAGEACTION']._serialized_start=8040 + _globals['_SENDENTITYMESSAGEACTION']._serialized_end=8324 + _globals['_ORCHESTRATORVERSIONNOTAVAILABLEACTION']._serialized_start=8326 + _globals['_ORCHESTRATORVERSIONNOTAVAILABLEACTION']._serialized_end=8365 + _globals['_ORCHESTRATORACTION']._serialized_start=8368 + _globals['_ORCHESTRATORACTION']._serialized_end=8929 + _globals['_ORCHESTRATORREQUEST']._serialized_start=8932 + _globals['_ORCHESTRATORREQUEST']._serialized_end=9229 + _globals['_ORCHESTRATORRESPONSE']._serialized_start=9232 + _globals['_ORCHESTRATORRESPONSE']._serialized_end=9503 + _globals['_CREATEINSTANCEREQUEST']._serialized_start=9506 + _globals['_CREATEINSTANCEREQUEST']._serialized_end=9968 + _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_start=1859 + _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_end=1902 + _globals['_ORCHESTRATIONIDREUSEPOLICY']._serialized_start=9970 + _globals['_ORCHESTRATIONIDREUSEPOLICY']._serialized_end=10089 + _globals['_CREATEINSTANCERESPONSE']._serialized_start=10091 + _globals['_CREATEINSTANCERESPONSE']._serialized_end=10135 + _globals['_GETINSTANCEREQUEST']._serialized_start=10137 + _globals['_GETINSTANCEREQUEST']._serialized_end=10206 + _globals['_GETINSTANCERESPONSE']._serialized_start=10208 + _globals['_GETINSTANCERESPONSE']._serialized_end=10294 + _globals['_REWINDINSTANCEREQUEST']._serialized_start=10296 + _globals['_REWINDINSTANCEREQUEST']._serialized_end=10385 + _globals['_REWINDINSTANCERESPONSE']._serialized_start=10387 + _globals['_REWINDINSTANCERESPONSE']._serialized_end=10411 + _globals['_ORCHESTRATIONSTATE']._serialized_start=10414 + _globals['_ORCHESTRATIONSTATE']._serialized_end=11180 + _globals['_ORCHESTRATIONSTATE_TAGSENTRY']._serialized_start=1859 + _globals['_ORCHESTRATIONSTATE_TAGSENTRY']._serialized_end=1902 + _globals['_RAISEEVENTREQUEST']._serialized_start=11182 + _globals['_RAISEEVENTREQUEST']._serialized_end=11280 + _globals['_RAISEEVENTRESPONSE']._serialized_start=11282 + _globals['_RAISEEVENTRESPONSE']._serialized_end=11302 + _globals['_TERMINATEREQUEST']._serialized_start=11304 + _globals['_TERMINATEREQUEST']._serialized_end=11407 + _globals['_TERMINATERESPONSE']._serialized_start=11409 + _globals['_TERMINATERESPONSE']._serialized_end=11428 + _globals['_SUSPENDREQUEST']._serialized_start=11430 + _globals['_SUSPENDREQUEST']._serialized_end=11512 + _globals['_SUSPENDRESPONSE']._serialized_start=11514 + _globals['_SUSPENDRESPONSE']._serialized_end=11531 + _globals['_RESUMEREQUEST']._serialized_start=11533 + _globals['_RESUMEREQUEST']._serialized_end=11614 + _globals['_RESUMERESPONSE']._serialized_start=11616 + _globals['_RESUMERESPONSE']._serialized_end=11632 + _globals['_QUERYINSTANCESREQUEST']._serialized_start=11634 + _globals['_QUERYINSTANCESREQUEST']._serialized_end=11688 + _globals['_INSTANCEQUERY']._serialized_start=11691 + _globals['_INSTANCEQUERY']._serialized_end=12077 + _globals['_QUERYINSTANCESRESPONSE']._serialized_start=12080 + _globals['_QUERYINSTANCESRESPONSE']._serialized_end=12210 + _globals['_PURGEINSTANCESREQUEST']._serialized_start=12213 + _globals['_PURGEINSTANCESREQUEST']._serialized_end=12371 + _globals['_PURGEINSTANCEFILTER']._serialized_start=12374 + _globals['_PURGEINSTANCEFILTER']._serialized_end=12544 + _globals['_PURGEINSTANCESRESPONSE']._serialized_start=12546 + _globals['_PURGEINSTANCESRESPONSE']._serialized_end=12648 + _globals['_CREATETASKHUBREQUEST']._serialized_start=12650 + _globals['_CREATETASKHUBREQUEST']._serialized_end=12698 + _globals['_CREATETASKHUBRESPONSE']._serialized_start=12700 + _globals['_CREATETASKHUBRESPONSE']._serialized_end=12723 + _globals['_DELETETASKHUBREQUEST']._serialized_start=12725 + _globals['_DELETETASKHUBREQUEST']._serialized_end=12747 + _globals['_DELETETASKHUBRESPONSE']._serialized_start=12749 + _globals['_DELETETASKHUBRESPONSE']._serialized_end=12772 + _globals['_SIGNALENTITYREQUEST']._serialized_start=12775 + _globals['_SIGNALENTITYREQUEST']._serialized_end=12945 + _globals['_SIGNALENTITYRESPONSE']._serialized_start=12947 + _globals['_SIGNALENTITYRESPONSE']._serialized_end=12969 + _globals['_GETENTITYREQUEST']._serialized_start=12971 + _globals['_GETENTITYREQUEST']._serialized_end=13031 + _globals['_GETENTITYRESPONSE']._serialized_start=13033 + _globals['_GETENTITYRESPONSE']._serialized_end=13101 + _globals['_ENTITYQUERY']._serialized_start=13104 + _globals['_ENTITYQUERY']._serialized_end=13435 + _globals['_QUERYENTITIESREQUEST']._serialized_start=13437 + _globals['_QUERYENTITIESREQUEST']._serialized_end=13488 + _globals['_QUERYENTITIESRESPONSE']._serialized_start=13490 + _globals['_QUERYENTITIESRESPONSE']._serialized_end=13605 + _globals['_ENTITYMETADATA']._serialized_start=13608 + _globals['_ENTITYMETADATA']._serialized_end=13827 + _globals['_CLEANENTITYSTORAGEREQUEST']._serialized_start=13830 + _globals['_CLEANENTITYSTORAGEREQUEST']._serialized_end=13973 + _globals['_CLEANENTITYSTORAGERESPONSE']._serialized_start=13976 + _globals['_CLEANENTITYSTORAGERESPONSE']._serialized_end=14122 + _globals['_ORCHESTRATORENTITYPARAMETERS']._serialized_start=14124 + _globals['_ORCHESTRATORENTITYPARAMETERS']._serialized_end=14217 + _globals['_ENTITYBATCHREQUEST']._serialized_start=14220 + _globals['_ENTITYBATCHREQUEST']._serialized_end=14350 + _globals['_ENTITYBATCHRESULT']._serialized_start=14353 + _globals['_ENTITYBATCHRESULT']._serialized_end=14603 + _globals['_ENTITYREQUEST']._serialized_start=14606 + _globals['_ENTITYREQUEST']._serialized_end=14755 + _globals['_OPERATIONREQUEST']._serialized_start=14757 + _globals['_OPERATIONREQUEST']._serialized_end=14858 + _globals['_OPERATIONRESULT']._serialized_start=14860 + _globals['_OPERATIONRESULT']._serialized_end=14979 + _globals['_OPERATIONINFO']._serialized_start=14981 + _globals['_OPERATIONINFO']._serialized_end=15068 + _globals['_OPERATIONRESULTSUCCESS']._serialized_start=15070 + _globals['_OPERATIONRESULTSUCCESS']._serialized_end=15140 + _globals['_OPERATIONRESULTFAILURE']._serialized_start=15142 + _globals['_OPERATIONRESULTFAILURE']._serialized_end=15211 + _globals['_OPERATIONACTION']._serialized_start=15214 + _globals['_OPERATIONACTION']._serialized_end=15370 + _globals['_SENDSIGNALACTION']._serialized_start=15373 + _globals['_SENDSIGNALACTION']._serialized_end=15521 + _globals['_STARTNEWORCHESTRATIONACTION']._serialized_start=15524 + _globals['_STARTNEWORCHESTRATIONACTION']._serialized_end=15730 + _globals['_ABANDONACTIVITYTASKREQUEST']._serialized_start=15732 + _globals['_ABANDONACTIVITYTASKREQUEST']._serialized_end=15785 + _globals['_ABANDONACTIVITYTASKRESPONSE']._serialized_start=15787 + _globals['_ABANDONACTIVITYTASKRESPONSE']._serialized_end=15816 + _globals['_ABANDONORCHESTRATIONTASKREQUEST']._serialized_start=15818 + _globals['_ABANDONORCHESTRATIONTASKREQUEST']._serialized_end=15876 + _globals['_ABANDONORCHESTRATIONTASKRESPONSE']._serialized_start=15878 + _globals['_ABANDONORCHESTRATIONTASKRESPONSE']._serialized_end=15912 + _globals['_ABANDONENTITYTASKREQUEST']._serialized_start=15914 + _globals['_ABANDONENTITYTASKREQUEST']._serialized_end=15965 + _globals['_ABANDONENTITYTASKRESPONSE']._serialized_start=15967 + _globals['_ABANDONENTITYTASKRESPONSE']._serialized_end=15994 + _globals['_GETWORKITEMSREQUEST']._serialized_start=15997 + _globals['_GETWORKITEMSREQUEST']._serialized_end=16182 + _globals['_WORKITEM']._serialized_start=16185 + _globals['_WORKITEM']._serialized_end=16453 + _globals['_COMPLETETASKRESPONSE']._serialized_start=16455 + _globals['_COMPLETETASKRESPONSE']._serialized_end=16477 + _globals['_HEALTHPING']._serialized_start=16479 + _globals['_HEALTHPING']._serialized_end=16491 + _globals['_STREAMINSTANCEHISTORYREQUEST']._serialized_start=16494 + _globals['_STREAMINSTANCEHISTORYREQUEST']._serialized_end=16626 + _globals['_HISTORYCHUNK']._serialized_start=16628 + _globals['_HISTORYCHUNK']._serialized_end=16673 + _globals['_RERUNWORKFLOWFROMEVENTREQUEST']._serialized_start=16676 + _globals['_RERUNWORKFLOWFROMEVENTREQUEST']._serialized_end=16937 + _globals['_RERUNWORKFLOWFROMEVENTRESPONSE']._serialized_start=16939 + _globals['_RERUNWORKFLOWFROMEVENTRESPONSE']._serialized_end=16994 + _globals['_LISTINSTANCEIDSREQUEST']._serialized_start=16996 + _globals['_LISTINSTANCEIDSREQUEST']._serialized_end=17110 + _globals['_LISTINSTANCEIDSRESPONSE']._serialized_start=17112 + _globals['_LISTINSTANCEIDSRESPONSE']._serialized_end=17212 + _globals['_GETINSTANCEHISTORYREQUEST']._serialized_start=17214 + _globals['_GETINSTANCEHISTORYREQUEST']._serialized_end=17261 + _globals['_GETINSTANCEHISTORYRESPONSE']._serialized_start=17263 + _globals['_GETINSTANCEHISTORYRESPONSE']._serialized_end=17322 + _globals['_TASKHUBSIDECARSERVICE']._serialized_start=17898 + _globals['_TASKHUBSIDECARSERVICE']._serialized_end=19893 # @@protoc_insertion_point(module_scope) diff --git a/durabletask/internal/orchestrator_service_pb2.pyi b/durabletask/internal/orchestrator_service_pb2.pyi index 82d2e1ac..53a3e480 100644 --- a/durabletask/internal/orchestrator_service_pb2.pyi +++ b/durabletask/internal/orchestrator_service_pb2.pyi @@ -1,3 +1,5 @@ +import datetime + from google.protobuf import timestamp_pb2 as _timestamp_pb2 from google.protobuf import duration_pb2 as _duration_pb2 from google.protobuf import wrappers_pb2 as _wrappers_pb2 @@ -6,10 +8,16 @@ from google.protobuf.internal import containers as _containers from google.protobuf.internal import enum_type_wrapper as _enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message -from typing import ClassVar as _ClassVar, Iterable as _Iterable, Mapping as _Mapping, Optional as _Optional, Union as _Union +from collections.abc import Iterable as _Iterable, Mapping as _Mapping +from typing import ClassVar as _ClassVar, Optional as _Optional, Union as _Union DESCRIPTOR: _descriptor.FileDescriptor +class StalledReason(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): + __slots__ = () + PATCH_MISMATCH: _ClassVar[StalledReason] + VERSION_NOT_AVAILABLE: _ClassVar[StalledReason] + class OrchestrationStatus(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): __slots__ = () ORCHESTRATION_STATUS_RUNNING: _ClassVar[OrchestrationStatus] @@ -20,12 +28,20 @@ class OrchestrationStatus(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): ORCHESTRATION_STATUS_TERMINATED: _ClassVar[OrchestrationStatus] ORCHESTRATION_STATUS_PENDING: _ClassVar[OrchestrationStatus] ORCHESTRATION_STATUS_SUSPENDED: _ClassVar[OrchestrationStatus] + ORCHESTRATION_STATUS_STALLED: _ClassVar[OrchestrationStatus] class CreateOrchestrationAction(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): __slots__ = () ERROR: _ClassVar[CreateOrchestrationAction] IGNORE: _ClassVar[CreateOrchestrationAction] TERMINATE: _ClassVar[CreateOrchestrationAction] + +class WorkerCapability(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): + __slots__ = () + WORKER_CAPABILITY_UNSPECIFIED: _ClassVar[WorkerCapability] + WORKER_CAPABILITY_HISTORY_STREAMING: _ClassVar[WorkerCapability] +PATCH_MISMATCH: StalledReason +VERSION_NOT_AVAILABLE: StalledReason ORCHESTRATION_STATUS_RUNNING: OrchestrationStatus ORCHESTRATION_STATUS_COMPLETED: OrchestrationStatus ORCHESTRATION_STATUS_CONTINUED_AS_NEW: OrchestrationStatus @@ -34,9 +50,28 @@ ORCHESTRATION_STATUS_CANCELED: OrchestrationStatus ORCHESTRATION_STATUS_TERMINATED: OrchestrationStatus ORCHESTRATION_STATUS_PENDING: OrchestrationStatus ORCHESTRATION_STATUS_SUSPENDED: OrchestrationStatus +ORCHESTRATION_STATUS_STALLED: OrchestrationStatus ERROR: CreateOrchestrationAction IGNORE: CreateOrchestrationAction TERMINATE: CreateOrchestrationAction +WORKER_CAPABILITY_UNSPECIFIED: WorkerCapability +WORKER_CAPABILITY_HISTORY_STREAMING: WorkerCapability + +class TaskRouter(_message.Message): + __slots__ = ("sourceAppID", "targetAppID") + SOURCEAPPID_FIELD_NUMBER: _ClassVar[int] + TARGETAPPID_FIELD_NUMBER: _ClassVar[int] + sourceAppID: str + targetAppID: str + def __init__(self, sourceAppID: _Optional[str] = ..., targetAppID: _Optional[str] = ...) -> None: ... + +class OrchestrationVersion(_message.Message): + __slots__ = ("patches", "name") + PATCHES_FIELD_NUMBER: _ClassVar[int] + NAME_FIELD_NUMBER: _ClassVar[int] + patches: _containers.RepeatedScalarFieldContainer[str] + name: str + def __init__(self, patches: _Optional[_Iterable[str]] = ..., name: _Optional[str] = ...) -> None: ... class OrchestrationInstance(_message.Message): __slots__ = ("instanceId", "executionId") @@ -47,32 +82,36 @@ class OrchestrationInstance(_message.Message): def __init__(self, instanceId: _Optional[str] = ..., executionId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ...) -> None: ... class ActivityRequest(_message.Message): - __slots__ = ("name", "version", "input", "orchestrationInstance", "taskId", "parentTraceContext") + __slots__ = ("name", "version", "input", "orchestrationInstance", "taskId", "parentTraceContext", "taskExecutionId") NAME_FIELD_NUMBER: _ClassVar[int] VERSION_FIELD_NUMBER: _ClassVar[int] INPUT_FIELD_NUMBER: _ClassVar[int] ORCHESTRATIONINSTANCE_FIELD_NUMBER: _ClassVar[int] TASKID_FIELD_NUMBER: _ClassVar[int] PARENTTRACECONTEXT_FIELD_NUMBER: _ClassVar[int] + TASKEXECUTIONID_FIELD_NUMBER: _ClassVar[int] name: str version: _wrappers_pb2.StringValue input: _wrappers_pb2.StringValue orchestrationInstance: OrchestrationInstance taskId: int parentTraceContext: TraceContext - def __init__(self, name: _Optional[str] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., orchestrationInstance: _Optional[_Union[OrchestrationInstance, _Mapping]] = ..., taskId: _Optional[int] = ..., parentTraceContext: _Optional[_Union[TraceContext, _Mapping]] = ...) -> None: ... + taskExecutionId: str + def __init__(self, name: _Optional[str] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., orchestrationInstance: _Optional[_Union[OrchestrationInstance, _Mapping]] = ..., taskId: _Optional[int] = ..., parentTraceContext: _Optional[_Union[TraceContext, _Mapping]] = ..., taskExecutionId: _Optional[str] = ...) -> None: ... class ActivityResponse(_message.Message): - __slots__ = ("instanceId", "taskId", "result", "failureDetails") + __slots__ = ("instanceId", "taskId", "result", "failureDetails", "completionToken") INSTANCEID_FIELD_NUMBER: _ClassVar[int] TASKID_FIELD_NUMBER: _ClassVar[int] RESULT_FIELD_NUMBER: _ClassVar[int] FAILUREDETAILS_FIELD_NUMBER: _ClassVar[int] + COMPLETIONTOKEN_FIELD_NUMBER: _ClassVar[int] instanceId: str taskId: int result: _wrappers_pb2.StringValue failureDetails: TaskFailureDetails - def __init__(self, instanceId: _Optional[str] = ..., taskId: _Optional[int] = ..., result: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., failureDetails: _Optional[_Union[TaskFailureDetails, _Mapping]] = ...) -> None: ... + completionToken: str + def __init__(self, instanceId: _Optional[str] = ..., taskId: _Optional[int] = ..., result: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., failureDetails: _Optional[_Union[TaskFailureDetails, _Mapping]] = ..., completionToken: _Optional[str] = ...) -> None: ... class TaskFailureDetails(_message.Message): __slots__ = ("errorType", "errorMessage", "stackTrace", "innerFailure", "isNonRetriable") @@ -89,16 +128,24 @@ class TaskFailureDetails(_message.Message): def __init__(self, errorType: _Optional[str] = ..., errorMessage: _Optional[str] = ..., stackTrace: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., innerFailure: _Optional[_Union[TaskFailureDetails, _Mapping]] = ..., isNonRetriable: bool = ...) -> None: ... class ParentInstanceInfo(_message.Message): - __slots__ = ("taskScheduledId", "name", "version", "orchestrationInstance") + __slots__ = ("taskScheduledId", "name", "version", "orchestrationInstance", "appID") TASKSCHEDULEDID_FIELD_NUMBER: _ClassVar[int] NAME_FIELD_NUMBER: _ClassVar[int] VERSION_FIELD_NUMBER: _ClassVar[int] ORCHESTRATIONINSTANCE_FIELD_NUMBER: _ClassVar[int] + APPID_FIELD_NUMBER: _ClassVar[int] taskScheduledId: int name: _wrappers_pb2.StringValue version: _wrappers_pb2.StringValue orchestrationInstance: OrchestrationInstance - def __init__(self, taskScheduledId: _Optional[int] = ..., name: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., orchestrationInstance: _Optional[_Union[OrchestrationInstance, _Mapping]] = ...) -> None: ... + appID: str + def __init__(self, taskScheduledId: _Optional[int] = ..., name: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., orchestrationInstance: _Optional[_Union[OrchestrationInstance, _Mapping]] = ..., appID: _Optional[str] = ...) -> None: ... + +class RerunParentInstanceInfo(_message.Message): + __slots__ = ("instanceID",) + INSTANCEID_FIELD_NUMBER: _ClassVar[int] + instanceID: str + def __init__(self, instanceID: _Optional[str] = ...) -> None: ... class TraceContext(_message.Message): __slots__ = ("traceParent", "spanID", "traceState") @@ -111,7 +158,14 @@ class TraceContext(_message.Message): def __init__(self, traceParent: _Optional[str] = ..., spanID: _Optional[str] = ..., traceState: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ...) -> None: ... class ExecutionStartedEvent(_message.Message): - __slots__ = ("name", "version", "input", "orchestrationInstance", "parentInstance", "scheduledStartTimestamp", "parentTraceContext", "orchestrationSpanID") + __slots__ = ("name", "version", "input", "orchestrationInstance", "parentInstance", "scheduledStartTimestamp", "parentTraceContext", "orchestrationSpanID", "tags") + class TagsEntry(_message.Message): + __slots__ = ("key", "value") + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: str + def __init__(self, key: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ... NAME_FIELD_NUMBER: _ClassVar[int] VERSION_FIELD_NUMBER: _ClassVar[int] INPUT_FIELD_NUMBER: _ClassVar[int] @@ -120,6 +174,7 @@ class ExecutionStartedEvent(_message.Message): SCHEDULEDSTARTTIMESTAMP_FIELD_NUMBER: _ClassVar[int] PARENTTRACECONTEXT_FIELD_NUMBER: _ClassVar[int] ORCHESTRATIONSPANID_FIELD_NUMBER: _ClassVar[int] + TAGS_FIELD_NUMBER: _ClassVar[int] name: str version: _wrappers_pb2.StringValue input: _wrappers_pb2.StringValue @@ -128,7 +183,8 @@ class ExecutionStartedEvent(_message.Message): scheduledStartTimestamp: _timestamp_pb2.Timestamp parentTraceContext: TraceContext orchestrationSpanID: _wrappers_pb2.StringValue - def __init__(self, name: _Optional[str] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., orchestrationInstance: _Optional[_Union[OrchestrationInstance, _Mapping]] = ..., parentInstance: _Optional[_Union[ParentInstanceInfo, _Mapping]] = ..., scheduledStartTimestamp: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., parentTraceContext: _Optional[_Union[TraceContext, _Mapping]] = ..., orchestrationSpanID: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ...) -> None: ... + tags: _containers.ScalarMap[str, str] + def __init__(self, name: _Optional[str] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., orchestrationInstance: _Optional[_Union[OrchestrationInstance, _Mapping]] = ..., parentInstance: _Optional[_Union[ParentInstanceInfo, _Mapping]] = ..., scheduledStartTimestamp: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ..., parentTraceContext: _Optional[_Union[TraceContext, _Mapping]] = ..., orchestrationSpanID: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., tags: _Optional[_Mapping[str, str]] = ...) -> None: ... class ExecutionCompletedEvent(_message.Message): __slots__ = ("orchestrationStatus", "result", "failureDetails") @@ -149,46 +205,56 @@ class ExecutionTerminatedEvent(_message.Message): def __init__(self, input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., recurse: bool = ...) -> None: ... class TaskScheduledEvent(_message.Message): - __slots__ = ("name", "version", "input", "parentTraceContext") + __slots__ = ("name", "version", "input", "parentTraceContext", "taskExecutionId", "rerunParentInstanceInfo") NAME_FIELD_NUMBER: _ClassVar[int] VERSION_FIELD_NUMBER: _ClassVar[int] INPUT_FIELD_NUMBER: _ClassVar[int] PARENTTRACECONTEXT_FIELD_NUMBER: _ClassVar[int] + TASKEXECUTIONID_FIELD_NUMBER: _ClassVar[int] + RERUNPARENTINSTANCEINFO_FIELD_NUMBER: _ClassVar[int] name: str version: _wrappers_pb2.StringValue input: _wrappers_pb2.StringValue parentTraceContext: TraceContext - def __init__(self, name: _Optional[str] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., parentTraceContext: _Optional[_Union[TraceContext, _Mapping]] = ...) -> None: ... + taskExecutionId: str + rerunParentInstanceInfo: RerunParentInstanceInfo + def __init__(self, name: _Optional[str] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., parentTraceContext: _Optional[_Union[TraceContext, _Mapping]] = ..., taskExecutionId: _Optional[str] = ..., rerunParentInstanceInfo: _Optional[_Union[RerunParentInstanceInfo, _Mapping]] = ...) -> None: ... class TaskCompletedEvent(_message.Message): - __slots__ = ("taskScheduledId", "result") + __slots__ = ("taskScheduledId", "result", "taskExecutionId") TASKSCHEDULEDID_FIELD_NUMBER: _ClassVar[int] RESULT_FIELD_NUMBER: _ClassVar[int] + TASKEXECUTIONID_FIELD_NUMBER: _ClassVar[int] taskScheduledId: int result: _wrappers_pb2.StringValue - def __init__(self, taskScheduledId: _Optional[int] = ..., result: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ...) -> None: ... + taskExecutionId: str + def __init__(self, taskScheduledId: _Optional[int] = ..., result: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., taskExecutionId: _Optional[str] = ...) -> None: ... class TaskFailedEvent(_message.Message): - __slots__ = ("taskScheduledId", "failureDetails") + __slots__ = ("taskScheduledId", "failureDetails", "taskExecutionId") TASKSCHEDULEDID_FIELD_NUMBER: _ClassVar[int] FAILUREDETAILS_FIELD_NUMBER: _ClassVar[int] + TASKEXECUTIONID_FIELD_NUMBER: _ClassVar[int] taskScheduledId: int failureDetails: TaskFailureDetails - def __init__(self, taskScheduledId: _Optional[int] = ..., failureDetails: _Optional[_Union[TaskFailureDetails, _Mapping]] = ...) -> None: ... + taskExecutionId: str + def __init__(self, taskScheduledId: _Optional[int] = ..., failureDetails: _Optional[_Union[TaskFailureDetails, _Mapping]] = ..., taskExecutionId: _Optional[str] = ...) -> None: ... class SubOrchestrationInstanceCreatedEvent(_message.Message): - __slots__ = ("instanceId", "name", "version", "input", "parentTraceContext") + __slots__ = ("instanceId", "name", "version", "input", "parentTraceContext", "rerunParentInstanceInfo") INSTANCEID_FIELD_NUMBER: _ClassVar[int] NAME_FIELD_NUMBER: _ClassVar[int] VERSION_FIELD_NUMBER: _ClassVar[int] INPUT_FIELD_NUMBER: _ClassVar[int] PARENTTRACECONTEXT_FIELD_NUMBER: _ClassVar[int] + RERUNPARENTINSTANCEINFO_FIELD_NUMBER: _ClassVar[int] instanceId: str name: str version: _wrappers_pb2.StringValue input: _wrappers_pb2.StringValue parentTraceContext: TraceContext - def __init__(self, instanceId: _Optional[str] = ..., name: _Optional[str] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., parentTraceContext: _Optional[_Union[TraceContext, _Mapping]] = ...) -> None: ... + rerunParentInstanceInfo: RerunParentInstanceInfo + def __init__(self, instanceId: _Optional[str] = ..., name: _Optional[str] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., parentTraceContext: _Optional[_Union[TraceContext, _Mapping]] = ..., rerunParentInstanceInfo: _Optional[_Union[RerunParentInstanceInfo, _Mapping]] = ...) -> None: ... class SubOrchestrationInstanceCompletedEvent(_message.Message): __slots__ = ("taskScheduledId", "result") @@ -207,10 +273,14 @@ class SubOrchestrationInstanceFailedEvent(_message.Message): def __init__(self, taskScheduledId: _Optional[int] = ..., failureDetails: _Optional[_Union[TaskFailureDetails, _Mapping]] = ...) -> None: ... class TimerCreatedEvent(_message.Message): - __slots__ = ("fireAt",) + __slots__ = ("fireAt", "name", "rerunParentInstanceInfo") FIREAT_FIELD_NUMBER: _ClassVar[int] + NAME_FIELD_NUMBER: _ClassVar[int] + RERUNPARENTINSTANCEINFO_FIELD_NUMBER: _ClassVar[int] fireAt: _timestamp_pb2.Timestamp - def __init__(self, fireAt: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ...) -> None: ... + name: str + rerunParentInstanceInfo: RerunParentInstanceInfo + def __init__(self, fireAt: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ..., name: _Optional[str] = ..., rerunParentInstanceInfo: _Optional[_Union[RerunParentInstanceInfo, _Mapping]] = ...) -> None: ... class TimerFiredEvent(_message.Message): __slots__ = ("fireAt", "timerId") @@ -218,11 +288,13 @@ class TimerFiredEvent(_message.Message): TIMERID_FIELD_NUMBER: _ClassVar[int] fireAt: _timestamp_pb2.Timestamp timerId: int - def __init__(self, fireAt: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., timerId: _Optional[int] = ...) -> None: ... + def __init__(self, fireAt: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ..., timerId: _Optional[int] = ...) -> None: ... class OrchestratorStartedEvent(_message.Message): - __slots__ = () - def __init__(self) -> None: ... + __slots__ = ("version",) + VERSION_FIELD_NUMBER: _ClassVar[int] + version: OrchestrationVersion + def __init__(self, version: _Optional[_Union[OrchestrationVersion, _Mapping]] = ...) -> None: ... class OrchestratorCompletedEvent(_message.Message): __slots__ = () @@ -276,8 +348,92 @@ class ExecutionResumedEvent(_message.Message): input: _wrappers_pb2.StringValue def __init__(self, input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ...) -> None: ... +class ExecutionStalledEvent(_message.Message): + __slots__ = ("reason", "description") + REASON_FIELD_NUMBER: _ClassVar[int] + DESCRIPTION_FIELD_NUMBER: _ClassVar[int] + reason: StalledReason + description: str + def __init__(self, reason: _Optional[_Union[StalledReason, str]] = ..., description: _Optional[str] = ...) -> None: ... + +class EntityOperationSignaledEvent(_message.Message): + __slots__ = ("requestId", "operation", "scheduledTime", "input", "targetInstanceId") + REQUESTID_FIELD_NUMBER: _ClassVar[int] + OPERATION_FIELD_NUMBER: _ClassVar[int] + SCHEDULEDTIME_FIELD_NUMBER: _ClassVar[int] + INPUT_FIELD_NUMBER: _ClassVar[int] + TARGETINSTANCEID_FIELD_NUMBER: _ClassVar[int] + requestId: str + operation: str + scheduledTime: _timestamp_pb2.Timestamp + input: _wrappers_pb2.StringValue + targetInstanceId: _wrappers_pb2.StringValue + def __init__(self, requestId: _Optional[str] = ..., operation: _Optional[str] = ..., scheduledTime: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., targetInstanceId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ...) -> None: ... + +class EntityOperationCalledEvent(_message.Message): + __slots__ = ("requestId", "operation", "scheduledTime", "input", "parentInstanceId", "parentExecutionId", "targetInstanceId") + REQUESTID_FIELD_NUMBER: _ClassVar[int] + OPERATION_FIELD_NUMBER: _ClassVar[int] + SCHEDULEDTIME_FIELD_NUMBER: _ClassVar[int] + INPUT_FIELD_NUMBER: _ClassVar[int] + PARENTINSTANCEID_FIELD_NUMBER: _ClassVar[int] + PARENTEXECUTIONID_FIELD_NUMBER: _ClassVar[int] + TARGETINSTANCEID_FIELD_NUMBER: _ClassVar[int] + requestId: str + operation: str + scheduledTime: _timestamp_pb2.Timestamp + input: _wrappers_pb2.StringValue + parentInstanceId: _wrappers_pb2.StringValue + parentExecutionId: _wrappers_pb2.StringValue + targetInstanceId: _wrappers_pb2.StringValue + def __init__(self, requestId: _Optional[str] = ..., operation: _Optional[str] = ..., scheduledTime: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., parentInstanceId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., parentExecutionId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., targetInstanceId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ...) -> None: ... + +class EntityLockRequestedEvent(_message.Message): + __slots__ = ("criticalSectionId", "lockSet", "position", "parentInstanceId") + CRITICALSECTIONID_FIELD_NUMBER: _ClassVar[int] + LOCKSET_FIELD_NUMBER: _ClassVar[int] + POSITION_FIELD_NUMBER: _ClassVar[int] + PARENTINSTANCEID_FIELD_NUMBER: _ClassVar[int] + criticalSectionId: str + lockSet: _containers.RepeatedScalarFieldContainer[str] + position: int + parentInstanceId: _wrappers_pb2.StringValue + def __init__(self, criticalSectionId: _Optional[str] = ..., lockSet: _Optional[_Iterable[str]] = ..., position: _Optional[int] = ..., parentInstanceId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ...) -> None: ... + +class EntityOperationCompletedEvent(_message.Message): + __slots__ = ("requestId", "output") + REQUESTID_FIELD_NUMBER: _ClassVar[int] + OUTPUT_FIELD_NUMBER: _ClassVar[int] + requestId: str + output: _wrappers_pb2.StringValue + def __init__(self, requestId: _Optional[str] = ..., output: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ...) -> None: ... + +class EntityOperationFailedEvent(_message.Message): + __slots__ = ("requestId", "failureDetails") + REQUESTID_FIELD_NUMBER: _ClassVar[int] + FAILUREDETAILS_FIELD_NUMBER: _ClassVar[int] + requestId: str + failureDetails: TaskFailureDetails + def __init__(self, requestId: _Optional[str] = ..., failureDetails: _Optional[_Union[TaskFailureDetails, _Mapping]] = ...) -> None: ... + +class EntityUnlockSentEvent(_message.Message): + __slots__ = ("criticalSectionId", "parentInstanceId", "targetInstanceId") + CRITICALSECTIONID_FIELD_NUMBER: _ClassVar[int] + PARENTINSTANCEID_FIELD_NUMBER: _ClassVar[int] + TARGETINSTANCEID_FIELD_NUMBER: _ClassVar[int] + criticalSectionId: str + parentInstanceId: _wrappers_pb2.StringValue + targetInstanceId: _wrappers_pb2.StringValue + def __init__(self, criticalSectionId: _Optional[str] = ..., parentInstanceId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., targetInstanceId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ...) -> None: ... + +class EntityLockGrantedEvent(_message.Message): + __slots__ = ("criticalSectionId",) + CRITICALSECTIONID_FIELD_NUMBER: _ClassVar[int] + criticalSectionId: str + def __init__(self, criticalSectionId: _Optional[str] = ...) -> None: ... + class HistoryEvent(_message.Message): - __slots__ = ("eventId", "timestamp", "executionStarted", "executionCompleted", "executionTerminated", "taskScheduled", "taskCompleted", "taskFailed", "subOrchestrationInstanceCreated", "subOrchestrationInstanceCompleted", "subOrchestrationInstanceFailed", "timerCreated", "timerFired", "orchestratorStarted", "orchestratorCompleted", "eventSent", "eventRaised", "genericEvent", "historyState", "continueAsNew", "executionSuspended", "executionResumed") + __slots__ = ("eventId", "timestamp", "executionStarted", "executionCompleted", "executionTerminated", "taskScheduled", "taskCompleted", "taskFailed", "subOrchestrationInstanceCreated", "subOrchestrationInstanceCompleted", "subOrchestrationInstanceFailed", "timerCreated", "timerFired", "orchestratorStarted", "orchestratorCompleted", "eventSent", "eventRaised", "genericEvent", "historyState", "continueAsNew", "executionSuspended", "executionResumed", "entityOperationSignaled", "entityOperationCalled", "entityOperationCompleted", "entityOperationFailed", "entityLockRequested", "entityLockGranted", "entityUnlockSent", "executionStalled", "router") EVENTID_FIELD_NUMBER: _ClassVar[int] TIMESTAMP_FIELD_NUMBER: _ClassVar[int] EXECUTIONSTARTED_FIELD_NUMBER: _ClassVar[int] @@ -300,6 +456,15 @@ class HistoryEvent(_message.Message): CONTINUEASNEW_FIELD_NUMBER: _ClassVar[int] EXECUTIONSUSPENDED_FIELD_NUMBER: _ClassVar[int] EXECUTIONRESUMED_FIELD_NUMBER: _ClassVar[int] + ENTITYOPERATIONSIGNALED_FIELD_NUMBER: _ClassVar[int] + ENTITYOPERATIONCALLED_FIELD_NUMBER: _ClassVar[int] + ENTITYOPERATIONCOMPLETED_FIELD_NUMBER: _ClassVar[int] + ENTITYOPERATIONFAILED_FIELD_NUMBER: _ClassVar[int] + ENTITYLOCKREQUESTED_FIELD_NUMBER: _ClassVar[int] + ENTITYLOCKGRANTED_FIELD_NUMBER: _ClassVar[int] + ENTITYUNLOCKSENT_FIELD_NUMBER: _ClassVar[int] + EXECUTIONSTALLED_FIELD_NUMBER: _ClassVar[int] + ROUTER_FIELD_NUMBER: _ClassVar[int] eventId: int timestamp: _timestamp_pb2.Timestamp executionStarted: ExecutionStartedEvent @@ -322,35 +487,52 @@ class HistoryEvent(_message.Message): continueAsNew: ContinueAsNewEvent executionSuspended: ExecutionSuspendedEvent executionResumed: ExecutionResumedEvent - def __init__(self, eventId: _Optional[int] = ..., timestamp: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., executionStarted: _Optional[_Union[ExecutionStartedEvent, _Mapping]] = ..., executionCompleted: _Optional[_Union[ExecutionCompletedEvent, _Mapping]] = ..., executionTerminated: _Optional[_Union[ExecutionTerminatedEvent, _Mapping]] = ..., taskScheduled: _Optional[_Union[TaskScheduledEvent, _Mapping]] = ..., taskCompleted: _Optional[_Union[TaskCompletedEvent, _Mapping]] = ..., taskFailed: _Optional[_Union[TaskFailedEvent, _Mapping]] = ..., subOrchestrationInstanceCreated: _Optional[_Union[SubOrchestrationInstanceCreatedEvent, _Mapping]] = ..., subOrchestrationInstanceCompleted: _Optional[_Union[SubOrchestrationInstanceCompletedEvent, _Mapping]] = ..., subOrchestrationInstanceFailed: _Optional[_Union[SubOrchestrationInstanceFailedEvent, _Mapping]] = ..., timerCreated: _Optional[_Union[TimerCreatedEvent, _Mapping]] = ..., timerFired: _Optional[_Union[TimerFiredEvent, _Mapping]] = ..., orchestratorStarted: _Optional[_Union[OrchestratorStartedEvent, _Mapping]] = ..., orchestratorCompleted: _Optional[_Union[OrchestratorCompletedEvent, _Mapping]] = ..., eventSent: _Optional[_Union[EventSentEvent, _Mapping]] = ..., eventRaised: _Optional[_Union[EventRaisedEvent, _Mapping]] = ..., genericEvent: _Optional[_Union[GenericEvent, _Mapping]] = ..., historyState: _Optional[_Union[HistoryStateEvent, _Mapping]] = ..., continueAsNew: _Optional[_Union[ContinueAsNewEvent, _Mapping]] = ..., executionSuspended: _Optional[_Union[ExecutionSuspendedEvent, _Mapping]] = ..., executionResumed: _Optional[_Union[ExecutionResumedEvent, _Mapping]] = ...) -> None: ... + entityOperationSignaled: EntityOperationSignaledEvent + entityOperationCalled: EntityOperationCalledEvent + entityOperationCompleted: EntityOperationCompletedEvent + entityOperationFailed: EntityOperationFailedEvent + entityLockRequested: EntityLockRequestedEvent + entityLockGranted: EntityLockGrantedEvent + entityUnlockSent: EntityUnlockSentEvent + executionStalled: ExecutionStalledEvent + router: TaskRouter + def __init__(self, eventId: _Optional[int] = ..., timestamp: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ..., executionStarted: _Optional[_Union[ExecutionStartedEvent, _Mapping]] = ..., executionCompleted: _Optional[_Union[ExecutionCompletedEvent, _Mapping]] = ..., executionTerminated: _Optional[_Union[ExecutionTerminatedEvent, _Mapping]] = ..., taskScheduled: _Optional[_Union[TaskScheduledEvent, _Mapping]] = ..., taskCompleted: _Optional[_Union[TaskCompletedEvent, _Mapping]] = ..., taskFailed: _Optional[_Union[TaskFailedEvent, _Mapping]] = ..., subOrchestrationInstanceCreated: _Optional[_Union[SubOrchestrationInstanceCreatedEvent, _Mapping]] = ..., subOrchestrationInstanceCompleted: _Optional[_Union[SubOrchestrationInstanceCompletedEvent, _Mapping]] = ..., subOrchestrationInstanceFailed: _Optional[_Union[SubOrchestrationInstanceFailedEvent, _Mapping]] = ..., timerCreated: _Optional[_Union[TimerCreatedEvent, _Mapping]] = ..., timerFired: _Optional[_Union[TimerFiredEvent, _Mapping]] = ..., orchestratorStarted: _Optional[_Union[OrchestratorStartedEvent, _Mapping]] = ..., orchestratorCompleted: _Optional[_Union[OrchestratorCompletedEvent, _Mapping]] = ..., eventSent: _Optional[_Union[EventSentEvent, _Mapping]] = ..., eventRaised: _Optional[_Union[EventRaisedEvent, _Mapping]] = ..., genericEvent: _Optional[_Union[GenericEvent, _Mapping]] = ..., historyState: _Optional[_Union[HistoryStateEvent, _Mapping]] = ..., continueAsNew: _Optional[_Union[ContinueAsNewEvent, _Mapping]] = ..., executionSuspended: _Optional[_Union[ExecutionSuspendedEvent, _Mapping]] = ..., executionResumed: _Optional[_Union[ExecutionResumedEvent, _Mapping]] = ..., entityOperationSignaled: _Optional[_Union[EntityOperationSignaledEvent, _Mapping]] = ..., entityOperationCalled: _Optional[_Union[EntityOperationCalledEvent, _Mapping]] = ..., entityOperationCompleted: _Optional[_Union[EntityOperationCompletedEvent, _Mapping]] = ..., entityOperationFailed: _Optional[_Union[EntityOperationFailedEvent, _Mapping]] = ..., entityLockRequested: _Optional[_Union[EntityLockRequestedEvent, _Mapping]] = ..., entityLockGranted: _Optional[_Union[EntityLockGrantedEvent, _Mapping]] = ..., entityUnlockSent: _Optional[_Union[EntityUnlockSentEvent, _Mapping]] = ..., executionStalled: _Optional[_Union[ExecutionStalledEvent, _Mapping]] = ..., router: _Optional[_Union[TaskRouter, _Mapping]] = ...) -> None: ... class ScheduleTaskAction(_message.Message): - __slots__ = ("name", "version", "input") + __slots__ = ("name", "version", "input", "router", "taskExecutionId") NAME_FIELD_NUMBER: _ClassVar[int] VERSION_FIELD_NUMBER: _ClassVar[int] INPUT_FIELD_NUMBER: _ClassVar[int] + ROUTER_FIELD_NUMBER: _ClassVar[int] + TASKEXECUTIONID_FIELD_NUMBER: _ClassVar[int] name: str version: _wrappers_pb2.StringValue input: _wrappers_pb2.StringValue - def __init__(self, name: _Optional[str] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ...) -> None: ... + router: TaskRouter + taskExecutionId: str + def __init__(self, name: _Optional[str] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., router: _Optional[_Union[TaskRouter, _Mapping]] = ..., taskExecutionId: _Optional[str] = ...) -> None: ... class CreateSubOrchestrationAction(_message.Message): - __slots__ = ("instanceId", "name", "version", "input") + __slots__ = ("instanceId", "name", "version", "input", "router") INSTANCEID_FIELD_NUMBER: _ClassVar[int] NAME_FIELD_NUMBER: _ClassVar[int] VERSION_FIELD_NUMBER: _ClassVar[int] INPUT_FIELD_NUMBER: _ClassVar[int] + ROUTER_FIELD_NUMBER: _ClassVar[int] instanceId: str name: str version: _wrappers_pb2.StringValue input: _wrappers_pb2.StringValue - def __init__(self, instanceId: _Optional[str] = ..., name: _Optional[str] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ...) -> None: ... + router: TaskRouter + def __init__(self, instanceId: _Optional[str] = ..., name: _Optional[str] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., router: _Optional[_Union[TaskRouter, _Mapping]] = ...) -> None: ... class CreateTimerAction(_message.Message): - __slots__ = ("fireAt",) + __slots__ = ("fireAt", "name") FIREAT_FIELD_NUMBER: _ClassVar[int] + NAME_FIELD_NUMBER: _ClassVar[int] fireAt: _timestamp_pb2.Timestamp - def __init__(self, fireAt: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ...) -> None: ... + name: str + def __init__(self, fireAt: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ..., name: _Optional[str] = ...) -> None: ... class SendEventAction(_message.Message): __slots__ = ("instance", "name", "data") @@ -388,8 +570,24 @@ class TerminateOrchestrationAction(_message.Message): recurse: bool def __init__(self, instanceId: _Optional[str] = ..., reason: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., recurse: bool = ...) -> None: ... +class SendEntityMessageAction(_message.Message): + __slots__ = ("entityOperationSignaled", "entityOperationCalled", "entityLockRequested", "entityUnlockSent") + ENTITYOPERATIONSIGNALED_FIELD_NUMBER: _ClassVar[int] + ENTITYOPERATIONCALLED_FIELD_NUMBER: _ClassVar[int] + ENTITYLOCKREQUESTED_FIELD_NUMBER: _ClassVar[int] + ENTITYUNLOCKSENT_FIELD_NUMBER: _ClassVar[int] + entityOperationSignaled: EntityOperationSignaledEvent + entityOperationCalled: EntityOperationCalledEvent + entityLockRequested: EntityLockRequestedEvent + entityUnlockSent: EntityUnlockSentEvent + def __init__(self, entityOperationSignaled: _Optional[_Union[EntityOperationSignaledEvent, _Mapping]] = ..., entityOperationCalled: _Optional[_Union[EntityOperationCalledEvent, _Mapping]] = ..., entityLockRequested: _Optional[_Union[EntityLockRequestedEvent, _Mapping]] = ..., entityUnlockSent: _Optional[_Union[EntityUnlockSentEvent, _Mapping]] = ...) -> None: ... + +class OrchestratorVersionNotAvailableAction(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + class OrchestratorAction(_message.Message): - __slots__ = ("id", "scheduleTask", "createSubOrchestration", "createTimer", "sendEvent", "completeOrchestration", "terminateOrchestration") + __slots__ = ("id", "scheduleTask", "createSubOrchestration", "createTimer", "sendEvent", "completeOrchestration", "terminateOrchestration", "sendEntityMessage", "orchestratorVersionNotAvailable", "router") ID_FIELD_NUMBER: _ClassVar[int] SCHEDULETASK_FIELD_NUMBER: _ClassVar[int] CREATESUBORCHESTRATION_FIELD_NUMBER: _ClassVar[int] @@ -397,6 +595,9 @@ class OrchestratorAction(_message.Message): SENDEVENT_FIELD_NUMBER: _ClassVar[int] COMPLETEORCHESTRATION_FIELD_NUMBER: _ClassVar[int] TERMINATEORCHESTRATION_FIELD_NUMBER: _ClassVar[int] + SENDENTITYMESSAGE_FIELD_NUMBER: _ClassVar[int] + ORCHESTRATORVERSIONNOTAVAILABLE_FIELD_NUMBER: _ClassVar[int] + ROUTER_FIELD_NUMBER: _ClassVar[int] id: int scheduleTask: ScheduleTaskAction createSubOrchestration: CreateSubOrchestrationAction @@ -404,34 +605,47 @@ class OrchestratorAction(_message.Message): sendEvent: SendEventAction completeOrchestration: CompleteOrchestrationAction terminateOrchestration: TerminateOrchestrationAction - def __init__(self, id: _Optional[int] = ..., scheduleTask: _Optional[_Union[ScheduleTaskAction, _Mapping]] = ..., createSubOrchestration: _Optional[_Union[CreateSubOrchestrationAction, _Mapping]] = ..., createTimer: _Optional[_Union[CreateTimerAction, _Mapping]] = ..., sendEvent: _Optional[_Union[SendEventAction, _Mapping]] = ..., completeOrchestration: _Optional[_Union[CompleteOrchestrationAction, _Mapping]] = ..., terminateOrchestration: _Optional[_Union[TerminateOrchestrationAction, _Mapping]] = ...) -> None: ... + sendEntityMessage: SendEntityMessageAction + orchestratorVersionNotAvailable: OrchestratorVersionNotAvailableAction + router: TaskRouter + def __init__(self, id: _Optional[int] = ..., scheduleTask: _Optional[_Union[ScheduleTaskAction, _Mapping]] = ..., createSubOrchestration: _Optional[_Union[CreateSubOrchestrationAction, _Mapping]] = ..., createTimer: _Optional[_Union[CreateTimerAction, _Mapping]] = ..., sendEvent: _Optional[_Union[SendEventAction, _Mapping]] = ..., completeOrchestration: _Optional[_Union[CompleteOrchestrationAction, _Mapping]] = ..., terminateOrchestration: _Optional[_Union[TerminateOrchestrationAction, _Mapping]] = ..., sendEntityMessage: _Optional[_Union[SendEntityMessageAction, _Mapping]] = ..., orchestratorVersionNotAvailable: _Optional[_Union[OrchestratorVersionNotAvailableAction, _Mapping]] = ..., router: _Optional[_Union[TaskRouter, _Mapping]] = ...) -> None: ... class OrchestratorRequest(_message.Message): - __slots__ = ("instanceId", "executionId", "pastEvents", "newEvents", "entityParameters") + __slots__ = ("instanceId", "executionId", "pastEvents", "newEvents", "entityParameters", "requiresHistoryStreaming", "router") INSTANCEID_FIELD_NUMBER: _ClassVar[int] EXECUTIONID_FIELD_NUMBER: _ClassVar[int] PASTEVENTS_FIELD_NUMBER: _ClassVar[int] NEWEVENTS_FIELD_NUMBER: _ClassVar[int] ENTITYPARAMETERS_FIELD_NUMBER: _ClassVar[int] + REQUIRESHISTORYSTREAMING_FIELD_NUMBER: _ClassVar[int] + ROUTER_FIELD_NUMBER: _ClassVar[int] instanceId: str executionId: _wrappers_pb2.StringValue pastEvents: _containers.RepeatedCompositeFieldContainer[HistoryEvent] newEvents: _containers.RepeatedCompositeFieldContainer[HistoryEvent] entityParameters: OrchestratorEntityParameters - def __init__(self, instanceId: _Optional[str] = ..., executionId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., pastEvents: _Optional[_Iterable[_Union[HistoryEvent, _Mapping]]] = ..., newEvents: _Optional[_Iterable[_Union[HistoryEvent, _Mapping]]] = ..., entityParameters: _Optional[_Union[OrchestratorEntityParameters, _Mapping]] = ...) -> None: ... + requiresHistoryStreaming: bool + router: TaskRouter + def __init__(self, instanceId: _Optional[str] = ..., executionId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., pastEvents: _Optional[_Iterable[_Union[HistoryEvent, _Mapping]]] = ..., newEvents: _Optional[_Iterable[_Union[HistoryEvent, _Mapping]]] = ..., entityParameters: _Optional[_Union[OrchestratorEntityParameters, _Mapping]] = ..., requiresHistoryStreaming: bool = ..., router: _Optional[_Union[TaskRouter, _Mapping]] = ...) -> None: ... class OrchestratorResponse(_message.Message): - __slots__ = ("instanceId", "actions", "customStatus") + __slots__ = ("instanceId", "actions", "customStatus", "completionToken", "numEventsProcessed", "version") INSTANCEID_FIELD_NUMBER: _ClassVar[int] ACTIONS_FIELD_NUMBER: _ClassVar[int] CUSTOMSTATUS_FIELD_NUMBER: _ClassVar[int] + COMPLETIONTOKEN_FIELD_NUMBER: _ClassVar[int] + NUMEVENTSPROCESSED_FIELD_NUMBER: _ClassVar[int] + VERSION_FIELD_NUMBER: _ClassVar[int] instanceId: str actions: _containers.RepeatedCompositeFieldContainer[OrchestratorAction] customStatus: _wrappers_pb2.StringValue - def __init__(self, instanceId: _Optional[str] = ..., actions: _Optional[_Iterable[_Union[OrchestratorAction, _Mapping]]] = ..., customStatus: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ...) -> None: ... + completionToken: str + numEventsProcessed: _wrappers_pb2.Int32Value + version: OrchestrationVersion + def __init__(self, instanceId: _Optional[str] = ..., actions: _Optional[_Iterable[_Union[OrchestratorAction, _Mapping]]] = ..., customStatus: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., completionToken: _Optional[str] = ..., numEventsProcessed: _Optional[_Union[_wrappers_pb2.Int32Value, _Mapping]] = ..., version: _Optional[_Union[OrchestrationVersion, _Mapping]] = ...) -> None: ... class CreateInstanceRequest(_message.Message): - __slots__ = ("instanceId", "name", "version", "input", "scheduledStartTimestamp", "orchestrationIdReusePolicy", "executionId", "tags") + __slots__ = ("instanceId", "name", "version", "input", "scheduledStartTimestamp", "orchestrationIdReusePolicy", "executionId", "tags", "parentTraceContext") class TagsEntry(_message.Message): __slots__ = ("key", "value") KEY_FIELD_NUMBER: _ClassVar[int] @@ -447,6 +661,7 @@ class CreateInstanceRequest(_message.Message): ORCHESTRATIONIDREUSEPOLICY_FIELD_NUMBER: _ClassVar[int] EXECUTIONID_FIELD_NUMBER: _ClassVar[int] TAGS_FIELD_NUMBER: _ClassVar[int] + PARENTTRACECONTEXT_FIELD_NUMBER: _ClassVar[int] instanceId: str name: str version: _wrappers_pb2.StringValue @@ -455,7 +670,8 @@ class CreateInstanceRequest(_message.Message): orchestrationIdReusePolicy: OrchestrationIdReusePolicy executionId: _wrappers_pb2.StringValue tags: _containers.ScalarMap[str, str] - def __init__(self, instanceId: _Optional[str] = ..., name: _Optional[str] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., scheduledStartTimestamp: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., orchestrationIdReusePolicy: _Optional[_Union[OrchestrationIdReusePolicy, _Mapping]] = ..., executionId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., tags: _Optional[_Mapping[str, str]] = ...) -> None: ... + parentTraceContext: TraceContext + def __init__(self, instanceId: _Optional[str] = ..., name: _Optional[str] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., scheduledStartTimestamp: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ..., orchestrationIdReusePolicy: _Optional[_Union[OrchestrationIdReusePolicy, _Mapping]] = ..., executionId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., tags: _Optional[_Mapping[str, str]] = ..., parentTraceContext: _Optional[_Union[TraceContext, _Mapping]] = ...) -> None: ... class OrchestrationIdReusePolicy(_message.Message): __slots__ = ("operationStatus", "action") @@ -500,7 +716,14 @@ class RewindInstanceResponse(_message.Message): def __init__(self) -> None: ... class OrchestrationState(_message.Message): - __slots__ = ("instanceId", "name", "version", "orchestrationStatus", "scheduledStartTimestamp", "createdTimestamp", "lastUpdatedTimestamp", "input", "output", "customStatus", "failureDetails", "executionId", "completedTimestamp", "parentInstanceId") + __slots__ = ("instanceId", "name", "version", "orchestrationStatus", "scheduledStartTimestamp", "createdTimestamp", "lastUpdatedTimestamp", "input", "output", "customStatus", "failureDetails", "executionId", "completedTimestamp", "parentInstanceId", "tags") + class TagsEntry(_message.Message): + __slots__ = ("key", "value") + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: str + def __init__(self, key: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ... INSTANCEID_FIELD_NUMBER: _ClassVar[int] NAME_FIELD_NUMBER: _ClassVar[int] VERSION_FIELD_NUMBER: _ClassVar[int] @@ -515,6 +738,7 @@ class OrchestrationState(_message.Message): EXECUTIONID_FIELD_NUMBER: _ClassVar[int] COMPLETEDTIMESTAMP_FIELD_NUMBER: _ClassVar[int] PARENTINSTANCEID_FIELD_NUMBER: _ClassVar[int] + TAGS_FIELD_NUMBER: _ClassVar[int] instanceId: str name: str version: _wrappers_pb2.StringValue @@ -529,7 +753,8 @@ class OrchestrationState(_message.Message): executionId: _wrappers_pb2.StringValue completedTimestamp: _timestamp_pb2.Timestamp parentInstanceId: _wrappers_pb2.StringValue - def __init__(self, instanceId: _Optional[str] = ..., name: _Optional[str] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., orchestrationStatus: _Optional[_Union[OrchestrationStatus, str]] = ..., scheduledStartTimestamp: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., createdTimestamp: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., lastUpdatedTimestamp: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., output: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., customStatus: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., failureDetails: _Optional[_Union[TaskFailureDetails, _Mapping]] = ..., executionId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., completedTimestamp: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., parentInstanceId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ...) -> None: ... + tags: _containers.ScalarMap[str, str] + def __init__(self, instanceId: _Optional[str] = ..., name: _Optional[str] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., orchestrationStatus: _Optional[_Union[OrchestrationStatus, str]] = ..., scheduledStartTimestamp: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ..., createdTimestamp: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ..., lastUpdatedTimestamp: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., output: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., customStatus: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., failureDetails: _Optional[_Union[TaskFailureDetails, _Mapping]] = ..., executionId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., completedTimestamp: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ..., parentInstanceId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., tags: _Optional[_Mapping[str, str]] = ...) -> None: ... class RaiseEventRequest(_message.Message): __slots__ = ("instanceId", "name", "input") @@ -607,7 +832,7 @@ class InstanceQuery(_message.Message): continuationToken: _wrappers_pb2.StringValue instanceIdPrefix: _wrappers_pb2.StringValue fetchInputsAndOutputs: bool - def __init__(self, runtimeStatus: _Optional[_Iterable[_Union[OrchestrationStatus, str]]] = ..., createdTimeFrom: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., createdTimeTo: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., taskHubNames: _Optional[_Iterable[_Union[_wrappers_pb2.StringValue, _Mapping]]] = ..., maxInstanceCount: _Optional[int] = ..., continuationToken: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., instanceIdPrefix: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., fetchInputsAndOutputs: bool = ...) -> None: ... + def __init__(self, runtimeStatus: _Optional[_Iterable[_Union[OrchestrationStatus, str]]] = ..., createdTimeFrom: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ..., createdTimeTo: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ..., taskHubNames: _Optional[_Iterable[_Union[_wrappers_pb2.StringValue, _Mapping]]] = ..., maxInstanceCount: _Optional[int] = ..., continuationToken: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., instanceIdPrefix: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., fetchInputsAndOutputs: bool = ...) -> None: ... class QueryInstancesResponse(_message.Message): __slots__ = ("orchestrationState", "continuationToken") @@ -618,14 +843,16 @@ class QueryInstancesResponse(_message.Message): def __init__(self, orchestrationState: _Optional[_Iterable[_Union[OrchestrationState, _Mapping]]] = ..., continuationToken: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ...) -> None: ... class PurgeInstancesRequest(_message.Message): - __slots__ = ("instanceId", "purgeInstanceFilter", "recursive") + __slots__ = ("instanceId", "purgeInstanceFilter", "recursive", "force") INSTANCEID_FIELD_NUMBER: _ClassVar[int] PURGEINSTANCEFILTER_FIELD_NUMBER: _ClassVar[int] RECURSIVE_FIELD_NUMBER: _ClassVar[int] + FORCE_FIELD_NUMBER: _ClassVar[int] instanceId: str purgeInstanceFilter: PurgeInstanceFilter recursive: bool - def __init__(self, instanceId: _Optional[str] = ..., purgeInstanceFilter: _Optional[_Union[PurgeInstanceFilter, _Mapping]] = ..., recursive: bool = ...) -> None: ... + force: bool + def __init__(self, instanceId: _Optional[str] = ..., purgeInstanceFilter: _Optional[_Union[PurgeInstanceFilter, _Mapping]] = ..., recursive: bool = ..., force: bool = ...) -> None: ... class PurgeInstanceFilter(_message.Message): __slots__ = ("createdTimeFrom", "createdTimeTo", "runtimeStatus") @@ -635,13 +862,15 @@ class PurgeInstanceFilter(_message.Message): createdTimeFrom: _timestamp_pb2.Timestamp createdTimeTo: _timestamp_pb2.Timestamp runtimeStatus: _containers.RepeatedScalarFieldContainer[OrchestrationStatus] - def __init__(self, createdTimeFrom: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., createdTimeTo: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., runtimeStatus: _Optional[_Iterable[_Union[OrchestrationStatus, str]]] = ...) -> None: ... + def __init__(self, createdTimeFrom: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ..., createdTimeTo: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ..., runtimeStatus: _Optional[_Iterable[_Union[OrchestrationStatus, str]]] = ...) -> None: ... class PurgeInstancesResponse(_message.Message): - __slots__ = ("deletedInstanceCount",) + __slots__ = ("deletedInstanceCount", "isComplete") DELETEDINSTANCECOUNT_FIELD_NUMBER: _ClassVar[int] + ISCOMPLETE_FIELD_NUMBER: _ClassVar[int] deletedInstanceCount: int - def __init__(self, deletedInstanceCount: _Optional[int] = ...) -> None: ... + isComplete: _wrappers_pb2.BoolValue + def __init__(self, deletedInstanceCount: _Optional[int] = ..., isComplete: _Optional[_Union[_wrappers_pb2.BoolValue, _Mapping]] = ...) -> None: ... class CreateTaskHubRequest(_message.Message): __slots__ = ("recreateIfExists",) @@ -673,7 +902,7 @@ class SignalEntityRequest(_message.Message): input: _wrappers_pb2.StringValue requestId: str scheduledTime: _timestamp_pb2.Timestamp - def __init__(self, instanceId: _Optional[str] = ..., name: _Optional[str] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., requestId: _Optional[str] = ..., scheduledTime: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ...) -> None: ... + def __init__(self, instanceId: _Optional[str] = ..., name: _Optional[str] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., requestId: _Optional[str] = ..., scheduledTime: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ...) -> None: ... class SignalEntityResponse(_message.Message): __slots__ = () @@ -711,7 +940,7 @@ class EntityQuery(_message.Message): includeTransient: bool pageSize: _wrappers_pb2.Int32Value continuationToken: _wrappers_pb2.StringValue - def __init__(self, instanceIdStartsWith: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., lastModifiedFrom: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., lastModifiedTo: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., includeState: bool = ..., includeTransient: bool = ..., pageSize: _Optional[_Union[_wrappers_pb2.Int32Value, _Mapping]] = ..., continuationToken: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ...) -> None: ... + def __init__(self, instanceIdStartsWith: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., lastModifiedFrom: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ..., lastModifiedTo: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ..., includeState: bool = ..., includeTransient: bool = ..., pageSize: _Optional[_Union[_wrappers_pb2.Int32Value, _Mapping]] = ..., continuationToken: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ...) -> None: ... class QueryEntitiesRequest(_message.Message): __slots__ = ("query",) @@ -739,7 +968,7 @@ class EntityMetadata(_message.Message): backlogQueueSize: int lockedBy: _wrappers_pb2.StringValue serializedState: _wrappers_pb2.StringValue - def __init__(self, instanceId: _Optional[str] = ..., lastModifiedTime: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., backlogQueueSize: _Optional[int] = ..., lockedBy: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., serializedState: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ...) -> None: ... + def __init__(self, instanceId: _Optional[str] = ..., lastModifiedTime: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ..., backlogQueueSize: _Optional[int] = ..., lockedBy: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., serializedState: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ...) -> None: ... class CleanEntityStorageRequest(_message.Message): __slots__ = ("continuationToken", "removeEmptyEntities", "releaseOrphanedLocks") @@ -765,7 +994,7 @@ class OrchestratorEntityParameters(_message.Message): __slots__ = ("entityMessageReorderWindow",) ENTITYMESSAGEREORDERWINDOW_FIELD_NUMBER: _ClassVar[int] entityMessageReorderWindow: _duration_pb2.Duration - def __init__(self, entityMessageReorderWindow: _Optional[_Union[_duration_pb2.Duration, _Mapping]] = ...) -> None: ... + def __init__(self, entityMessageReorderWindow: _Optional[_Union[datetime.timedelta, _duration_pb2.Duration, _Mapping]] = ...) -> None: ... class EntityBatchRequest(_message.Message): __slots__ = ("instanceId", "entityState", "operations") @@ -778,16 +1007,32 @@ class EntityBatchRequest(_message.Message): def __init__(self, instanceId: _Optional[str] = ..., entityState: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., operations: _Optional[_Iterable[_Union[OperationRequest, _Mapping]]] = ...) -> None: ... class EntityBatchResult(_message.Message): - __slots__ = ("results", "actions", "entityState", "failureDetails") + __slots__ = ("results", "actions", "entityState", "failureDetails", "completionToken", "operationInfos") RESULTS_FIELD_NUMBER: _ClassVar[int] ACTIONS_FIELD_NUMBER: _ClassVar[int] ENTITYSTATE_FIELD_NUMBER: _ClassVar[int] FAILUREDETAILS_FIELD_NUMBER: _ClassVar[int] + COMPLETIONTOKEN_FIELD_NUMBER: _ClassVar[int] + OPERATIONINFOS_FIELD_NUMBER: _ClassVar[int] results: _containers.RepeatedCompositeFieldContainer[OperationResult] actions: _containers.RepeatedCompositeFieldContainer[OperationAction] entityState: _wrappers_pb2.StringValue failureDetails: TaskFailureDetails - def __init__(self, results: _Optional[_Iterable[_Union[OperationResult, _Mapping]]] = ..., actions: _Optional[_Iterable[_Union[OperationAction, _Mapping]]] = ..., entityState: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., failureDetails: _Optional[_Union[TaskFailureDetails, _Mapping]] = ...) -> None: ... + completionToken: str + operationInfos: _containers.RepeatedCompositeFieldContainer[OperationInfo] + def __init__(self, results: _Optional[_Iterable[_Union[OperationResult, _Mapping]]] = ..., actions: _Optional[_Iterable[_Union[OperationAction, _Mapping]]] = ..., entityState: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., failureDetails: _Optional[_Union[TaskFailureDetails, _Mapping]] = ..., completionToken: _Optional[str] = ..., operationInfos: _Optional[_Iterable[_Union[OperationInfo, _Mapping]]] = ...) -> None: ... + +class EntityRequest(_message.Message): + __slots__ = ("instanceId", "executionId", "entityState", "operationRequests") + INSTANCEID_FIELD_NUMBER: _ClassVar[int] + EXECUTIONID_FIELD_NUMBER: _ClassVar[int] + ENTITYSTATE_FIELD_NUMBER: _ClassVar[int] + OPERATIONREQUESTS_FIELD_NUMBER: _ClassVar[int] + instanceId: str + executionId: str + entityState: _wrappers_pb2.StringValue + operationRequests: _containers.RepeatedCompositeFieldContainer[HistoryEvent] + def __init__(self, instanceId: _Optional[str] = ..., executionId: _Optional[str] = ..., entityState: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., operationRequests: _Optional[_Iterable[_Union[HistoryEvent, _Mapping]]] = ...) -> None: ... class OperationRequest(_message.Message): __slots__ = ("operation", "requestId", "input") @@ -807,6 +1052,14 @@ class OperationResult(_message.Message): failure: OperationResultFailure def __init__(self, success: _Optional[_Union[OperationResultSuccess, _Mapping]] = ..., failure: _Optional[_Union[OperationResultFailure, _Mapping]] = ...) -> None: ... +class OperationInfo(_message.Message): + __slots__ = ("requestId", "responseDestination") + REQUESTID_FIELD_NUMBER: _ClassVar[int] + RESPONSEDESTINATION_FIELD_NUMBER: _ClassVar[int] + requestId: str + responseDestination: OrchestrationInstance + def __init__(self, requestId: _Optional[str] = ..., responseDestination: _Optional[_Union[OrchestrationInstance, _Mapping]] = ...) -> None: ... + class OperationResultSuccess(_message.Message): __slots__ = ("result",) RESULT_FIELD_NUMBER: _ClassVar[int] @@ -839,7 +1092,7 @@ class SendSignalAction(_message.Message): name: str input: _wrappers_pb2.StringValue scheduledTime: _timestamp_pb2.Timestamp - def __init__(self, instanceId: _Optional[str] = ..., name: _Optional[str] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., scheduledTime: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ...) -> None: ... + def __init__(self, instanceId: _Optional[str] = ..., name: _Optional[str] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., scheduledTime: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ...) -> None: ... class StartNewOrchestrationAction(_message.Message): __slots__ = ("instanceId", "name", "version", "input", "scheduledTime") @@ -853,25 +1106,65 @@ class StartNewOrchestrationAction(_message.Message): version: _wrappers_pb2.StringValue input: _wrappers_pb2.StringValue scheduledTime: _timestamp_pb2.Timestamp - def __init__(self, instanceId: _Optional[str] = ..., name: _Optional[str] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., scheduledTime: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ...) -> None: ... + def __init__(self, instanceId: _Optional[str] = ..., name: _Optional[str] = ..., version: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., scheduledTime: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ...) -> None: ... -class GetWorkItemsRequest(_message.Message): +class AbandonActivityTaskRequest(_message.Message): + __slots__ = ("completionToken",) + COMPLETIONTOKEN_FIELD_NUMBER: _ClassVar[int] + completionToken: str + def __init__(self, completionToken: _Optional[str] = ...) -> None: ... + +class AbandonActivityTaskResponse(_message.Message): __slots__ = () def __init__(self) -> None: ... +class AbandonOrchestrationTaskRequest(_message.Message): + __slots__ = ("completionToken",) + COMPLETIONTOKEN_FIELD_NUMBER: _ClassVar[int] + completionToken: str + def __init__(self, completionToken: _Optional[str] = ...) -> None: ... + +class AbandonOrchestrationTaskResponse(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + +class AbandonEntityTaskRequest(_message.Message): + __slots__ = ("completionToken",) + COMPLETIONTOKEN_FIELD_NUMBER: _ClassVar[int] + completionToken: str + def __init__(self, completionToken: _Optional[str] = ...) -> None: ... + +class AbandonEntityTaskResponse(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + +class GetWorkItemsRequest(_message.Message): + __slots__ = ("maxConcurrentOrchestrationWorkItems", "maxConcurrentActivityWorkItems", "maxConcurrentEntityWorkItems", "capabilities") + MAXCONCURRENTORCHESTRATIONWORKITEMS_FIELD_NUMBER: _ClassVar[int] + MAXCONCURRENTACTIVITYWORKITEMS_FIELD_NUMBER: _ClassVar[int] + MAXCONCURRENTENTITYWORKITEMS_FIELD_NUMBER: _ClassVar[int] + CAPABILITIES_FIELD_NUMBER: _ClassVar[int] + maxConcurrentOrchestrationWorkItems: int + maxConcurrentActivityWorkItems: int + maxConcurrentEntityWorkItems: int + capabilities: _containers.RepeatedScalarFieldContainer[WorkerCapability] + def __init__(self, maxConcurrentOrchestrationWorkItems: _Optional[int] = ..., maxConcurrentActivityWorkItems: _Optional[int] = ..., maxConcurrentEntityWorkItems: _Optional[int] = ..., capabilities: _Optional[_Iterable[_Union[WorkerCapability, str]]] = ...) -> None: ... + class WorkItem(_message.Message): - __slots__ = ("orchestratorRequest", "activityRequest", "entityRequest", "healthPing", "completionToken") + __slots__ = ("orchestratorRequest", "activityRequest", "entityRequest", "healthPing", "entityRequestV2", "completionToken") ORCHESTRATORREQUEST_FIELD_NUMBER: _ClassVar[int] ACTIVITYREQUEST_FIELD_NUMBER: _ClassVar[int] ENTITYREQUEST_FIELD_NUMBER: _ClassVar[int] HEALTHPING_FIELD_NUMBER: _ClassVar[int] + ENTITYREQUESTV2_FIELD_NUMBER: _ClassVar[int] COMPLETIONTOKEN_FIELD_NUMBER: _ClassVar[int] orchestratorRequest: OrchestratorRequest activityRequest: ActivityRequest entityRequest: EntityBatchRequest healthPing: HealthPing + entityRequestV2: EntityRequest completionToken: str - def __init__(self, orchestratorRequest: _Optional[_Union[OrchestratorRequest, _Mapping]] = ..., activityRequest: _Optional[_Union[ActivityRequest, _Mapping]] = ..., entityRequest: _Optional[_Union[EntityBatchRequest, _Mapping]] = ..., healthPing: _Optional[_Union[HealthPing, _Mapping]] = ..., completionToken: _Optional[str] = ...) -> None: ... + def __init__(self, orchestratorRequest: _Optional[_Union[OrchestratorRequest, _Mapping]] = ..., activityRequest: _Optional[_Union[ActivityRequest, _Mapping]] = ..., entityRequest: _Optional[_Union[EntityBatchRequest, _Mapping]] = ..., healthPing: _Optional[_Union[HealthPing, _Mapping]] = ..., entityRequestV2: _Optional[_Union[EntityRequest, _Mapping]] = ..., completionToken: _Optional[str] = ...) -> None: ... class CompleteTaskResponse(_message.Message): __slots__ = () @@ -880,3 +1173,69 @@ class CompleteTaskResponse(_message.Message): class HealthPing(_message.Message): __slots__ = () def __init__(self) -> None: ... + +class StreamInstanceHistoryRequest(_message.Message): + __slots__ = ("instanceId", "executionId", "forWorkItemProcessing") + INSTANCEID_FIELD_NUMBER: _ClassVar[int] + EXECUTIONID_FIELD_NUMBER: _ClassVar[int] + FORWORKITEMPROCESSING_FIELD_NUMBER: _ClassVar[int] + instanceId: str + executionId: _wrappers_pb2.StringValue + forWorkItemProcessing: bool + def __init__(self, instanceId: _Optional[str] = ..., executionId: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., forWorkItemProcessing: bool = ...) -> None: ... + +class HistoryChunk(_message.Message): + __slots__ = ("events",) + EVENTS_FIELD_NUMBER: _ClassVar[int] + events: _containers.RepeatedCompositeFieldContainer[HistoryEvent] + def __init__(self, events: _Optional[_Iterable[_Union[HistoryEvent, _Mapping]]] = ...) -> None: ... + +class RerunWorkflowFromEventRequest(_message.Message): + __slots__ = ("sourceInstanceID", "eventID", "newInstanceID", "input", "overwriteInput", "newChildWorkflowInstanceID") + SOURCEINSTANCEID_FIELD_NUMBER: _ClassVar[int] + EVENTID_FIELD_NUMBER: _ClassVar[int] + NEWINSTANCEID_FIELD_NUMBER: _ClassVar[int] + INPUT_FIELD_NUMBER: _ClassVar[int] + OVERWRITEINPUT_FIELD_NUMBER: _ClassVar[int] + NEWCHILDWORKFLOWINSTANCEID_FIELD_NUMBER: _ClassVar[int] + sourceInstanceID: str + eventID: int + newInstanceID: str + input: _wrappers_pb2.StringValue + overwriteInput: bool + newChildWorkflowInstanceID: str + def __init__(self, sourceInstanceID: _Optional[str] = ..., eventID: _Optional[int] = ..., newInstanceID: _Optional[str] = ..., input: _Optional[_Union[_wrappers_pb2.StringValue, _Mapping]] = ..., overwriteInput: bool = ..., newChildWorkflowInstanceID: _Optional[str] = ...) -> None: ... + +class RerunWorkflowFromEventResponse(_message.Message): + __slots__ = ("newInstanceID",) + NEWINSTANCEID_FIELD_NUMBER: _ClassVar[int] + newInstanceID: str + def __init__(self, newInstanceID: _Optional[str] = ...) -> None: ... + +class ListInstanceIDsRequest(_message.Message): + __slots__ = ("continuationToken", "pageSize") + CONTINUATIONTOKEN_FIELD_NUMBER: _ClassVar[int] + PAGESIZE_FIELD_NUMBER: _ClassVar[int] + continuationToken: str + pageSize: int + def __init__(self, continuationToken: _Optional[str] = ..., pageSize: _Optional[int] = ...) -> None: ... + +class ListInstanceIDsResponse(_message.Message): + __slots__ = ("instanceIds", "continuationToken") + INSTANCEIDS_FIELD_NUMBER: _ClassVar[int] + CONTINUATIONTOKEN_FIELD_NUMBER: _ClassVar[int] + instanceIds: _containers.RepeatedScalarFieldContainer[str] + continuationToken: str + def __init__(self, instanceIds: _Optional[_Iterable[str]] = ..., continuationToken: _Optional[str] = ...) -> None: ... + +class GetInstanceHistoryRequest(_message.Message): + __slots__ = ("instanceId",) + INSTANCEID_FIELD_NUMBER: _ClassVar[int] + instanceId: str + def __init__(self, instanceId: _Optional[str] = ...) -> None: ... + +class GetInstanceHistoryResponse(_message.Message): + __slots__ = ("events",) + EVENTS_FIELD_NUMBER: _ClassVar[int] + events: _containers.RepeatedCompositeFieldContainer[HistoryEvent] + def __init__(self, events: _Optional[_Iterable[_Union[HistoryEvent, _Mapping]]] = ...) -> None: ... diff --git a/durabletask/internal/orchestrator_service_pb2_grpc.py b/durabletask/internal/orchestrator_service_pb2_grpc.py index f11cf4bb..eaca21c7 100644 --- a/durabletask/internal/orchestrator_service_pb2_grpc.py +++ b/durabletask/internal/orchestrator_service_pb2_grpc.py @@ -3,12 +3,10 @@ import grpc import warnings +from durabletask.internal import orchestrator_service_pb2 as durabletask_dot_internal_dot_orchestrator__service__pb2 from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -# TODO: This is a manual edit. Need to figure out how to not manually edit this file. -import durabletask.internal.orchestrator_service_pb2 as orchestrator__service__pb2 - -GRPC_GENERATED_VERSION = '1.67.0' +GRPC_GENERATED_VERSION = '1.75.1' GRPC_VERSION = grpc.__version__ _version_not_supported = False @@ -21,7 +19,7 @@ if _version_not_supported: raise RuntimeError( f'The grpc package installed is at version {GRPC_VERSION},' - + f' but the generated code in orchestrator_service_pb2_grpc.py depends on' + + f' but the generated code in durabletask/internal/orchestrator_service_pb2_grpc.py depends on' + f' grpcio>={GRPC_GENERATED_VERSION}.' + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}' + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.' @@ -44,108 +42,143 @@ def __init__(self, channel): _registered_method=True) self.StartInstance = channel.unary_unary( '/TaskHubSidecarService/StartInstance', - request_serializer=orchestrator__service__pb2.CreateInstanceRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.CreateInstanceResponse.FromString, + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CreateInstanceRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CreateInstanceResponse.FromString, _registered_method=True) self.GetInstance = channel.unary_unary( '/TaskHubSidecarService/GetInstance', - request_serializer=orchestrator__service__pb2.GetInstanceRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.GetInstanceResponse.FromString, + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceResponse.FromString, _registered_method=True) self.RewindInstance = channel.unary_unary( '/TaskHubSidecarService/RewindInstance', - request_serializer=orchestrator__service__pb2.RewindInstanceRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.RewindInstanceResponse.FromString, + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RewindInstanceRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RewindInstanceResponse.FromString, _registered_method=True) self.WaitForInstanceStart = channel.unary_unary( '/TaskHubSidecarService/WaitForInstanceStart', - request_serializer=orchestrator__service__pb2.GetInstanceRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.GetInstanceResponse.FromString, + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceResponse.FromString, _registered_method=True) self.WaitForInstanceCompletion = channel.unary_unary( '/TaskHubSidecarService/WaitForInstanceCompletion', - request_serializer=orchestrator__service__pb2.GetInstanceRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.GetInstanceResponse.FromString, + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceResponse.FromString, _registered_method=True) self.RaiseEvent = channel.unary_unary( '/TaskHubSidecarService/RaiseEvent', - request_serializer=orchestrator__service__pb2.RaiseEventRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.RaiseEventResponse.FromString, + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RaiseEventRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RaiseEventResponse.FromString, _registered_method=True) self.TerminateInstance = channel.unary_unary( '/TaskHubSidecarService/TerminateInstance', - request_serializer=orchestrator__service__pb2.TerminateRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.TerminateResponse.FromString, + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.TerminateRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.TerminateResponse.FromString, _registered_method=True) self.SuspendInstance = channel.unary_unary( '/TaskHubSidecarService/SuspendInstance', - request_serializer=orchestrator__service__pb2.SuspendRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.SuspendResponse.FromString, + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.SuspendRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.SuspendResponse.FromString, _registered_method=True) self.ResumeInstance = channel.unary_unary( '/TaskHubSidecarService/ResumeInstance', - request_serializer=orchestrator__service__pb2.ResumeRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.ResumeResponse.FromString, + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.ResumeRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.ResumeResponse.FromString, _registered_method=True) self.QueryInstances = channel.unary_unary( '/TaskHubSidecarService/QueryInstances', - request_serializer=orchestrator__service__pb2.QueryInstancesRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.QueryInstancesResponse.FromString, + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.QueryInstancesRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.QueryInstancesResponse.FromString, _registered_method=True) self.PurgeInstances = channel.unary_unary( '/TaskHubSidecarService/PurgeInstances', - request_serializer=orchestrator__service__pb2.PurgeInstancesRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.PurgeInstancesResponse.FromString, + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.PurgeInstancesRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.PurgeInstancesResponse.FromString, _registered_method=True) self.GetWorkItems = channel.unary_stream( '/TaskHubSidecarService/GetWorkItems', - request_serializer=orchestrator__service__pb2.GetWorkItemsRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.WorkItem.FromString, + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetWorkItemsRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.WorkItem.FromString, _registered_method=True) self.CompleteActivityTask = channel.unary_unary( '/TaskHubSidecarService/CompleteActivityTask', - request_serializer=orchestrator__service__pb2.ActivityResponse.SerializeToString, - response_deserializer=orchestrator__service__pb2.CompleteTaskResponse.FromString, + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.ActivityResponse.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CompleteTaskResponse.FromString, _registered_method=True) self.CompleteOrchestratorTask = channel.unary_unary( '/TaskHubSidecarService/CompleteOrchestratorTask', - request_serializer=orchestrator__service__pb2.OrchestratorResponse.SerializeToString, - response_deserializer=orchestrator__service__pb2.CompleteTaskResponse.FromString, + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.OrchestratorResponse.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CompleteTaskResponse.FromString, _registered_method=True) self.CompleteEntityTask = channel.unary_unary( '/TaskHubSidecarService/CompleteEntityTask', - request_serializer=orchestrator__service__pb2.EntityBatchResult.SerializeToString, - response_deserializer=orchestrator__service__pb2.CompleteTaskResponse.FromString, + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.EntityBatchResult.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CompleteTaskResponse.FromString, + _registered_method=True) + self.StreamInstanceHistory = channel.unary_stream( + '/TaskHubSidecarService/StreamInstanceHistory', + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.StreamInstanceHistoryRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.HistoryChunk.FromString, _registered_method=True) self.CreateTaskHub = channel.unary_unary( '/TaskHubSidecarService/CreateTaskHub', - request_serializer=orchestrator__service__pb2.CreateTaskHubRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.CreateTaskHubResponse.FromString, + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CreateTaskHubRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CreateTaskHubResponse.FromString, _registered_method=True) self.DeleteTaskHub = channel.unary_unary( '/TaskHubSidecarService/DeleteTaskHub', - request_serializer=orchestrator__service__pb2.DeleteTaskHubRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.DeleteTaskHubResponse.FromString, + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.DeleteTaskHubRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.DeleteTaskHubResponse.FromString, _registered_method=True) self.SignalEntity = channel.unary_unary( '/TaskHubSidecarService/SignalEntity', - request_serializer=orchestrator__service__pb2.SignalEntityRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.SignalEntityResponse.FromString, + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.SignalEntityRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.SignalEntityResponse.FromString, _registered_method=True) self.GetEntity = channel.unary_unary( '/TaskHubSidecarService/GetEntity', - request_serializer=orchestrator__service__pb2.GetEntityRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.GetEntityResponse.FromString, + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetEntityRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetEntityResponse.FromString, _registered_method=True) self.QueryEntities = channel.unary_unary( '/TaskHubSidecarService/QueryEntities', - request_serializer=orchestrator__service__pb2.QueryEntitiesRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.QueryEntitiesResponse.FromString, + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.QueryEntitiesRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.QueryEntitiesResponse.FromString, _registered_method=True) self.CleanEntityStorage = channel.unary_unary( '/TaskHubSidecarService/CleanEntityStorage', - request_serializer=orchestrator__service__pb2.CleanEntityStorageRequest.SerializeToString, - response_deserializer=orchestrator__service__pb2.CleanEntityStorageResponse.FromString, + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CleanEntityStorageRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CleanEntityStorageResponse.FromString, + _registered_method=True) + self.AbandonTaskActivityWorkItem = channel.unary_unary( + '/TaskHubSidecarService/AbandonTaskActivityWorkItem', + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.AbandonActivityTaskRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.AbandonActivityTaskResponse.FromString, + _registered_method=True) + self.AbandonTaskOrchestratorWorkItem = channel.unary_unary( + '/TaskHubSidecarService/AbandonTaskOrchestratorWorkItem', + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.AbandonOrchestrationTaskRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.AbandonOrchestrationTaskResponse.FromString, + _registered_method=True) + self.AbandonTaskEntityWorkItem = channel.unary_unary( + '/TaskHubSidecarService/AbandonTaskEntityWorkItem', + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.AbandonEntityTaskRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.AbandonEntityTaskResponse.FromString, + _registered_method=True) + self.RerunWorkflowFromEvent = channel.unary_unary( + '/TaskHubSidecarService/RerunWorkflowFromEvent', + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RerunWorkflowFromEventRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RerunWorkflowFromEventResponse.FromString, + _registered_method=True) + self.ListInstanceIDs = channel.unary_unary( + '/TaskHubSidecarService/ListInstanceIDs', + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.ListInstanceIDsRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.ListInstanceIDsResponse.FromString, + _registered_method=True) + self.GetInstanceHistory = channel.unary_unary( + '/TaskHubSidecarService/GetInstanceHistory', + request_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceHistoryRequest.SerializeToString, + response_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceHistoryResponse.FromString, _registered_method=True) @@ -260,6 +293,13 @@ def CompleteEntityTask(self, request, context): context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') + def StreamInstanceHistory(self, request, context): + """Gets the history of an orchestration instance as a stream of events. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + def CreateTaskHub(self, request, context): """Deletes and Creates the necessary resources for the orchestration service and the instance store """ @@ -302,6 +342,46 @@ def CleanEntityStorage(self, request, context): context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') + def AbandonTaskActivityWorkItem(self, request, context): + """Abandons a single work item + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def AbandonTaskOrchestratorWorkItem(self, request, context): + """Abandon an orchestration work item + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def AbandonTaskEntityWorkItem(self, request, context): + """Abandon an entity work item + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def RerunWorkflowFromEvent(self, request, context): + """Rerun a Workflow from a specific event ID of a workflow instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ListInstanceIDs(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetInstanceHistory(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + def add_TaskHubSidecarServiceServicer_to_server(servicer, server): rpc_method_handlers = { @@ -312,108 +392,143 @@ def add_TaskHubSidecarServiceServicer_to_server(servicer, server): ), 'StartInstance': grpc.unary_unary_rpc_method_handler( servicer.StartInstance, - request_deserializer=orchestrator__service__pb2.CreateInstanceRequest.FromString, - response_serializer=orchestrator__service__pb2.CreateInstanceResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CreateInstanceRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CreateInstanceResponse.SerializeToString, ), 'GetInstance': grpc.unary_unary_rpc_method_handler( servicer.GetInstance, - request_deserializer=orchestrator__service__pb2.GetInstanceRequest.FromString, - response_serializer=orchestrator__service__pb2.GetInstanceResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceResponse.SerializeToString, ), 'RewindInstance': grpc.unary_unary_rpc_method_handler( servicer.RewindInstance, - request_deserializer=orchestrator__service__pb2.RewindInstanceRequest.FromString, - response_serializer=orchestrator__service__pb2.RewindInstanceResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RewindInstanceRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RewindInstanceResponse.SerializeToString, ), 'WaitForInstanceStart': grpc.unary_unary_rpc_method_handler( servicer.WaitForInstanceStart, - request_deserializer=orchestrator__service__pb2.GetInstanceRequest.FromString, - response_serializer=orchestrator__service__pb2.GetInstanceResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceResponse.SerializeToString, ), 'WaitForInstanceCompletion': grpc.unary_unary_rpc_method_handler( servicer.WaitForInstanceCompletion, - request_deserializer=orchestrator__service__pb2.GetInstanceRequest.FromString, - response_serializer=orchestrator__service__pb2.GetInstanceResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceResponse.SerializeToString, ), 'RaiseEvent': grpc.unary_unary_rpc_method_handler( servicer.RaiseEvent, - request_deserializer=orchestrator__service__pb2.RaiseEventRequest.FromString, - response_serializer=orchestrator__service__pb2.RaiseEventResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RaiseEventRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RaiseEventResponse.SerializeToString, ), 'TerminateInstance': grpc.unary_unary_rpc_method_handler( servicer.TerminateInstance, - request_deserializer=orchestrator__service__pb2.TerminateRequest.FromString, - response_serializer=orchestrator__service__pb2.TerminateResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.TerminateRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.TerminateResponse.SerializeToString, ), 'SuspendInstance': grpc.unary_unary_rpc_method_handler( servicer.SuspendInstance, - request_deserializer=orchestrator__service__pb2.SuspendRequest.FromString, - response_serializer=orchestrator__service__pb2.SuspendResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.SuspendRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.SuspendResponse.SerializeToString, ), 'ResumeInstance': grpc.unary_unary_rpc_method_handler( servicer.ResumeInstance, - request_deserializer=orchestrator__service__pb2.ResumeRequest.FromString, - response_serializer=orchestrator__service__pb2.ResumeResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.ResumeRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.ResumeResponse.SerializeToString, ), 'QueryInstances': grpc.unary_unary_rpc_method_handler( servicer.QueryInstances, - request_deserializer=orchestrator__service__pb2.QueryInstancesRequest.FromString, - response_serializer=orchestrator__service__pb2.QueryInstancesResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.QueryInstancesRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.QueryInstancesResponse.SerializeToString, ), 'PurgeInstances': grpc.unary_unary_rpc_method_handler( servicer.PurgeInstances, - request_deserializer=orchestrator__service__pb2.PurgeInstancesRequest.FromString, - response_serializer=orchestrator__service__pb2.PurgeInstancesResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.PurgeInstancesRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.PurgeInstancesResponse.SerializeToString, ), 'GetWorkItems': grpc.unary_stream_rpc_method_handler( servicer.GetWorkItems, - request_deserializer=orchestrator__service__pb2.GetWorkItemsRequest.FromString, - response_serializer=orchestrator__service__pb2.WorkItem.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetWorkItemsRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.WorkItem.SerializeToString, ), 'CompleteActivityTask': grpc.unary_unary_rpc_method_handler( servicer.CompleteActivityTask, - request_deserializer=orchestrator__service__pb2.ActivityResponse.FromString, - response_serializer=orchestrator__service__pb2.CompleteTaskResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.ActivityResponse.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CompleteTaskResponse.SerializeToString, ), 'CompleteOrchestratorTask': grpc.unary_unary_rpc_method_handler( servicer.CompleteOrchestratorTask, - request_deserializer=orchestrator__service__pb2.OrchestratorResponse.FromString, - response_serializer=orchestrator__service__pb2.CompleteTaskResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.OrchestratorResponse.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CompleteTaskResponse.SerializeToString, ), 'CompleteEntityTask': grpc.unary_unary_rpc_method_handler( servicer.CompleteEntityTask, - request_deserializer=orchestrator__service__pb2.EntityBatchResult.FromString, - response_serializer=orchestrator__service__pb2.CompleteTaskResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.EntityBatchResult.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CompleteTaskResponse.SerializeToString, + ), + 'StreamInstanceHistory': grpc.unary_stream_rpc_method_handler( + servicer.StreamInstanceHistory, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.StreamInstanceHistoryRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.HistoryChunk.SerializeToString, ), 'CreateTaskHub': grpc.unary_unary_rpc_method_handler( servicer.CreateTaskHub, - request_deserializer=orchestrator__service__pb2.CreateTaskHubRequest.FromString, - response_serializer=orchestrator__service__pb2.CreateTaskHubResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CreateTaskHubRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CreateTaskHubResponse.SerializeToString, ), 'DeleteTaskHub': grpc.unary_unary_rpc_method_handler( servicer.DeleteTaskHub, - request_deserializer=orchestrator__service__pb2.DeleteTaskHubRequest.FromString, - response_serializer=orchestrator__service__pb2.DeleteTaskHubResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.DeleteTaskHubRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.DeleteTaskHubResponse.SerializeToString, ), 'SignalEntity': grpc.unary_unary_rpc_method_handler( servicer.SignalEntity, - request_deserializer=orchestrator__service__pb2.SignalEntityRequest.FromString, - response_serializer=orchestrator__service__pb2.SignalEntityResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.SignalEntityRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.SignalEntityResponse.SerializeToString, ), 'GetEntity': grpc.unary_unary_rpc_method_handler( servicer.GetEntity, - request_deserializer=orchestrator__service__pb2.GetEntityRequest.FromString, - response_serializer=orchestrator__service__pb2.GetEntityResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetEntityRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetEntityResponse.SerializeToString, ), 'QueryEntities': grpc.unary_unary_rpc_method_handler( servicer.QueryEntities, - request_deserializer=orchestrator__service__pb2.QueryEntitiesRequest.FromString, - response_serializer=orchestrator__service__pb2.QueryEntitiesResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.QueryEntitiesRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.QueryEntitiesResponse.SerializeToString, ), 'CleanEntityStorage': grpc.unary_unary_rpc_method_handler( servicer.CleanEntityStorage, - request_deserializer=orchestrator__service__pb2.CleanEntityStorageRequest.FromString, - response_serializer=orchestrator__service__pb2.CleanEntityStorageResponse.SerializeToString, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CleanEntityStorageRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.CleanEntityStorageResponse.SerializeToString, + ), + 'AbandonTaskActivityWorkItem': grpc.unary_unary_rpc_method_handler( + servicer.AbandonTaskActivityWorkItem, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.AbandonActivityTaskRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.AbandonActivityTaskResponse.SerializeToString, + ), + 'AbandonTaskOrchestratorWorkItem': grpc.unary_unary_rpc_method_handler( + servicer.AbandonTaskOrchestratorWorkItem, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.AbandonOrchestrationTaskRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.AbandonOrchestrationTaskResponse.SerializeToString, + ), + 'AbandonTaskEntityWorkItem': grpc.unary_unary_rpc_method_handler( + servicer.AbandonTaskEntityWorkItem, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.AbandonEntityTaskRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.AbandonEntityTaskResponse.SerializeToString, + ), + 'RerunWorkflowFromEvent': grpc.unary_unary_rpc_method_handler( + servicer.RerunWorkflowFromEvent, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RerunWorkflowFromEventRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.RerunWorkflowFromEventResponse.SerializeToString, + ), + 'ListInstanceIDs': grpc.unary_unary_rpc_method_handler( + servicer.ListInstanceIDs, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.ListInstanceIDsRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.ListInstanceIDsResponse.SerializeToString, + ), + 'GetInstanceHistory': grpc.unary_unary_rpc_method_handler( + servicer.GetInstanceHistory, + request_deserializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceHistoryRequest.FromString, + response_serializer=durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceHistoryResponse.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( @@ -468,8 +583,8 @@ def StartInstance(request, request, target, '/TaskHubSidecarService/StartInstance', - orchestrator__service__pb2.CreateInstanceRequest.SerializeToString, - orchestrator__service__pb2.CreateInstanceResponse.FromString, + durabletask_dot_internal_dot_orchestrator__service__pb2.CreateInstanceRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.CreateInstanceResponse.FromString, options, channel_credentials, insecure, @@ -495,8 +610,8 @@ def GetInstance(request, request, target, '/TaskHubSidecarService/GetInstance', - orchestrator__service__pb2.GetInstanceRequest.SerializeToString, - orchestrator__service__pb2.GetInstanceResponse.FromString, + durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceResponse.FromString, options, channel_credentials, insecure, @@ -522,8 +637,8 @@ def RewindInstance(request, request, target, '/TaskHubSidecarService/RewindInstance', - orchestrator__service__pb2.RewindInstanceRequest.SerializeToString, - orchestrator__service__pb2.RewindInstanceResponse.FromString, + durabletask_dot_internal_dot_orchestrator__service__pb2.RewindInstanceRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.RewindInstanceResponse.FromString, options, channel_credentials, insecure, @@ -549,8 +664,8 @@ def WaitForInstanceStart(request, request, target, '/TaskHubSidecarService/WaitForInstanceStart', - orchestrator__service__pb2.GetInstanceRequest.SerializeToString, - orchestrator__service__pb2.GetInstanceResponse.FromString, + durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceResponse.FromString, options, channel_credentials, insecure, @@ -576,8 +691,8 @@ def WaitForInstanceCompletion(request, request, target, '/TaskHubSidecarService/WaitForInstanceCompletion', - orchestrator__service__pb2.GetInstanceRequest.SerializeToString, - orchestrator__service__pb2.GetInstanceResponse.FromString, + durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceResponse.FromString, options, channel_credentials, insecure, @@ -603,8 +718,8 @@ def RaiseEvent(request, request, target, '/TaskHubSidecarService/RaiseEvent', - orchestrator__service__pb2.RaiseEventRequest.SerializeToString, - orchestrator__service__pb2.RaiseEventResponse.FromString, + durabletask_dot_internal_dot_orchestrator__service__pb2.RaiseEventRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.RaiseEventResponse.FromString, options, channel_credentials, insecure, @@ -630,8 +745,8 @@ def TerminateInstance(request, request, target, '/TaskHubSidecarService/TerminateInstance', - orchestrator__service__pb2.TerminateRequest.SerializeToString, - orchestrator__service__pb2.TerminateResponse.FromString, + durabletask_dot_internal_dot_orchestrator__service__pb2.TerminateRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.TerminateResponse.FromString, options, channel_credentials, insecure, @@ -657,8 +772,8 @@ def SuspendInstance(request, request, target, '/TaskHubSidecarService/SuspendInstance', - orchestrator__service__pb2.SuspendRequest.SerializeToString, - orchestrator__service__pb2.SuspendResponse.FromString, + durabletask_dot_internal_dot_orchestrator__service__pb2.SuspendRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.SuspendResponse.FromString, options, channel_credentials, insecure, @@ -684,8 +799,8 @@ def ResumeInstance(request, request, target, '/TaskHubSidecarService/ResumeInstance', - orchestrator__service__pb2.ResumeRequest.SerializeToString, - orchestrator__service__pb2.ResumeResponse.FromString, + durabletask_dot_internal_dot_orchestrator__service__pb2.ResumeRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.ResumeResponse.FromString, options, channel_credentials, insecure, @@ -711,8 +826,8 @@ def QueryInstances(request, request, target, '/TaskHubSidecarService/QueryInstances', - orchestrator__service__pb2.QueryInstancesRequest.SerializeToString, - orchestrator__service__pb2.QueryInstancesResponse.FromString, + durabletask_dot_internal_dot_orchestrator__service__pb2.QueryInstancesRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.QueryInstancesResponse.FromString, options, channel_credentials, insecure, @@ -738,8 +853,8 @@ def PurgeInstances(request, request, target, '/TaskHubSidecarService/PurgeInstances', - orchestrator__service__pb2.PurgeInstancesRequest.SerializeToString, - orchestrator__service__pb2.PurgeInstancesResponse.FromString, + durabletask_dot_internal_dot_orchestrator__service__pb2.PurgeInstancesRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.PurgeInstancesResponse.FromString, options, channel_credentials, insecure, @@ -765,8 +880,8 @@ def GetWorkItems(request, request, target, '/TaskHubSidecarService/GetWorkItems', - orchestrator__service__pb2.GetWorkItemsRequest.SerializeToString, - orchestrator__service__pb2.WorkItem.FromString, + durabletask_dot_internal_dot_orchestrator__service__pb2.GetWorkItemsRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.WorkItem.FromString, options, channel_credentials, insecure, @@ -792,8 +907,8 @@ def CompleteActivityTask(request, request, target, '/TaskHubSidecarService/CompleteActivityTask', - orchestrator__service__pb2.ActivityResponse.SerializeToString, - orchestrator__service__pb2.CompleteTaskResponse.FromString, + durabletask_dot_internal_dot_orchestrator__service__pb2.ActivityResponse.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.CompleteTaskResponse.FromString, options, channel_credentials, insecure, @@ -819,8 +934,8 @@ def CompleteOrchestratorTask(request, request, target, '/TaskHubSidecarService/CompleteOrchestratorTask', - orchestrator__service__pb2.OrchestratorResponse.SerializeToString, - orchestrator__service__pb2.CompleteTaskResponse.FromString, + durabletask_dot_internal_dot_orchestrator__service__pb2.OrchestratorResponse.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.CompleteTaskResponse.FromString, options, channel_credentials, insecure, @@ -846,8 +961,35 @@ def CompleteEntityTask(request, request, target, '/TaskHubSidecarService/CompleteEntityTask', - orchestrator__service__pb2.EntityBatchResult.SerializeToString, - orchestrator__service__pb2.CompleteTaskResponse.FromString, + durabletask_dot_internal_dot_orchestrator__service__pb2.EntityBatchResult.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.CompleteTaskResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def StreamInstanceHistory(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_stream( + request, + target, + '/TaskHubSidecarService/StreamInstanceHistory', + durabletask_dot_internal_dot_orchestrator__service__pb2.StreamInstanceHistoryRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.HistoryChunk.FromString, options, channel_credentials, insecure, @@ -873,8 +1015,8 @@ def CreateTaskHub(request, request, target, '/TaskHubSidecarService/CreateTaskHub', - orchestrator__service__pb2.CreateTaskHubRequest.SerializeToString, - orchestrator__service__pb2.CreateTaskHubResponse.FromString, + durabletask_dot_internal_dot_orchestrator__service__pb2.CreateTaskHubRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.CreateTaskHubResponse.FromString, options, channel_credentials, insecure, @@ -900,8 +1042,8 @@ def DeleteTaskHub(request, request, target, '/TaskHubSidecarService/DeleteTaskHub', - orchestrator__service__pb2.DeleteTaskHubRequest.SerializeToString, - orchestrator__service__pb2.DeleteTaskHubResponse.FromString, + durabletask_dot_internal_dot_orchestrator__service__pb2.DeleteTaskHubRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.DeleteTaskHubResponse.FromString, options, channel_credentials, insecure, @@ -927,8 +1069,8 @@ def SignalEntity(request, request, target, '/TaskHubSidecarService/SignalEntity', - orchestrator__service__pb2.SignalEntityRequest.SerializeToString, - orchestrator__service__pb2.SignalEntityResponse.FromString, + durabletask_dot_internal_dot_orchestrator__service__pb2.SignalEntityRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.SignalEntityResponse.FromString, options, channel_credentials, insecure, @@ -954,8 +1096,8 @@ def GetEntity(request, request, target, '/TaskHubSidecarService/GetEntity', - orchestrator__service__pb2.GetEntityRequest.SerializeToString, - orchestrator__service__pb2.GetEntityResponse.FromString, + durabletask_dot_internal_dot_orchestrator__service__pb2.GetEntityRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.GetEntityResponse.FromString, options, channel_credentials, insecure, @@ -981,8 +1123,8 @@ def QueryEntities(request, request, target, '/TaskHubSidecarService/QueryEntities', - orchestrator__service__pb2.QueryEntitiesRequest.SerializeToString, - orchestrator__service__pb2.QueryEntitiesResponse.FromString, + durabletask_dot_internal_dot_orchestrator__service__pb2.QueryEntitiesRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.QueryEntitiesResponse.FromString, options, channel_credentials, insecure, @@ -1008,8 +1150,170 @@ def CleanEntityStorage(request, request, target, '/TaskHubSidecarService/CleanEntityStorage', - orchestrator__service__pb2.CleanEntityStorageRequest.SerializeToString, - orchestrator__service__pb2.CleanEntityStorageResponse.FromString, + durabletask_dot_internal_dot_orchestrator__service__pb2.CleanEntityStorageRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.CleanEntityStorageResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def AbandonTaskActivityWorkItem(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/TaskHubSidecarService/AbandonTaskActivityWorkItem', + durabletask_dot_internal_dot_orchestrator__service__pb2.AbandonActivityTaskRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.AbandonActivityTaskResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def AbandonTaskOrchestratorWorkItem(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/TaskHubSidecarService/AbandonTaskOrchestratorWorkItem', + durabletask_dot_internal_dot_orchestrator__service__pb2.AbandonOrchestrationTaskRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.AbandonOrchestrationTaskResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def AbandonTaskEntityWorkItem(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/TaskHubSidecarService/AbandonTaskEntityWorkItem', + durabletask_dot_internal_dot_orchestrator__service__pb2.AbandonEntityTaskRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.AbandonEntityTaskResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def RerunWorkflowFromEvent(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/TaskHubSidecarService/RerunWorkflowFromEvent', + durabletask_dot_internal_dot_orchestrator__service__pb2.RerunWorkflowFromEventRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.RerunWorkflowFromEventResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def ListInstanceIDs(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/TaskHubSidecarService/ListInstanceIDs', + durabletask_dot_internal_dot_orchestrator__service__pb2.ListInstanceIDsRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.ListInstanceIDsResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def GetInstanceHistory(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/TaskHubSidecarService/GetInstanceHistory', + durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceHistoryRequest.SerializeToString, + durabletask_dot_internal_dot_orchestrator__service__pb2.GetInstanceHistoryResponse.FromString, options, channel_credentials, insecure, diff --git a/durabletask/internal/shared.py b/durabletask/internal/shared.py index 80c3d568..09645ede 100644 --- a/durabletask/internal/shared.py +++ b/durabletask/internal/shared.py @@ -4,53 +4,112 @@ import dataclasses import json import logging +import os from types import SimpleNamespace -from typing import Any, Dict, List, Tuple, Union +from typing import Any, Optional, Sequence, Union import grpc -from durabletask.internal.grpc_interceptor import DefaultClientInterceptorImpl +ClientInterceptor = Union[ + grpc.UnaryUnaryClientInterceptor, + grpc.UnaryStreamClientInterceptor, + grpc.StreamUnaryClientInterceptor, + grpc.StreamStreamClientInterceptor, +] # Field name used to indicate that an object was automatically serialized # and should be deserialized as a SimpleNamespace AUTO_SERIALIZED = "__durabletask_autoobject__" +SECURE_PROTOCOLS = ["https://", "grpcs://"] +INSECURE_PROTOCOLS = ["http://", "grpc://"] + def get_default_host_address() -> str: + """Resolve the default Durable Task sidecar address. + + Honors environment variables if present; otherwise defaults to localhost:4001. + + Supported environment variables (checked in order): + - DAPR_GRPC_ENDPOINT (e.g., "localhost:4001", "grpcs://host:443") + - DAPR_GRPC_HOST/DAPR_RUNTIME_HOST and DAPR_GRPC_PORT + """ + + # Full endpoint overrides + endpoint = os.environ.get("DAPR_GRPC_ENDPOINT") + if endpoint: + return endpoint + + # Host/port split overrides + host = os.environ.get("DAPR_GRPC_HOST") or os.environ.get("DAPR_RUNTIME_HOST") + if host: + port = os.environ.get("DAPR_GRPC_PORT", "4001") + return f"{host}:{port}" + + # Default to durabletask-go default port return "localhost:4001" -def get_grpc_channel(host_address: Union[str, None], metadata: Union[List[Tuple[str, str]], None], secure_channel: bool = False) -> grpc.Channel: +def get_grpc_channel( + host_address: Optional[str], + secure_channel: bool = False, + interceptors: Optional[Sequence[ClientInterceptor]] = None, + options: Optional[Sequence[tuple[str, Any]]] = None, +) -> grpc.Channel: + """create a grpc channel + + Args: + host_address: The host address of the gRPC server. If None, uses the default address (as defined in get_default_host_address above). + secure_channel: Whether to use a secure channel (TLS/SSL). Defaults to False. + interceptors: Optional sequence of client interceptors to apply to the channel. + options: Optional sequence of gRPC channel options as (key, value) tuples. Keys defined in https://grpc.github.io/grpc/core/group__grpc__arg__keys.html + """ if host_address is None: host_address = get_default_host_address() + for protocol in SECURE_PROTOCOLS: + if host_address.lower().startswith(protocol): + secure_channel = True + # remove the protocol from the host name + host_address = host_address[len(protocol) :] + break + + for protocol in INSECURE_PROTOCOLS: + if host_address.lower().startswith(protocol): + secure_channel = False + # remove the protocol from the host name + host_address = host_address[len(protocol) :] + break + if secure_channel: - channel = grpc.secure_channel(host_address, grpc.ssl_channel_credentials()) + channel = grpc.secure_channel(host_address, grpc.ssl_channel_credentials(), options=options) else: - channel = grpc.insecure_channel(host_address) + channel = grpc.insecure_channel(host_address, options=options) - if metadata is not None and len(metadata) > 0: - interceptors = [DefaultClientInterceptorImpl(metadata)] + # Apply interceptors ONLY if they exist + if interceptors: channel = grpc.intercept_channel(channel, *interceptors) return channel + def get_logger( - name_suffix: str, - log_handler: Union[logging.Handler, None] = None, - log_formatter: Union[logging.Formatter, None] = None) -> logging.Logger: - logger = logging.Logger(f"durabletask-{name_suffix}") + name_suffix: str, + log_handler: Optional[logging.Handler] = None, + log_formatter: Optional[logging.Formatter] = None, +) -> logging.Logger: + logger = logging.getLogger(f"durabletask-{name_suffix}") # Add a default log handler if none is provided if log_handler is None: log_handler = logging.StreamHandler() - log_handler.setLevel(logging.INFO) logger.handlers.append(log_handler) # Set a default log formatter to our handler if none is provided if log_formatter is None: log_formatter = logging.Formatter( fmt="%(asctime)s.%(msecs)03d %(name)s %(levelname)s: %(message)s", - datefmt='%Y-%m-%d %H:%M:%S') + datefmt="%Y-%m-%d %H:%M:%S", + ) log_handler.setFormatter(log_formatter) return logger @@ -78,7 +137,7 @@ def default(self, obj): if dataclasses.is_dataclass(obj): # Dataclasses are not serializable by default, so we convert them to a dict and mark them for # automatic deserialization by the receiver - d = dataclasses.asdict(obj) + d = dataclasses.asdict(obj) # type: ignore d[AUTO_SERIALIZED] = True return d elif isinstance(obj, SimpleNamespace): @@ -94,7 +153,7 @@ class InternalJSONDecoder(json.JSONDecoder): def __init__(self, *args, **kwargs): super().__init__(object_hook=self.dict_to_object, *args, **kwargs) - def dict_to_object(self, d: Dict[str, Any]): + def dict_to_object(self, d: dict[str, Any]): # If the object was serialized by the InternalJSONEncoder, deserialize it as a SimpleNamespace if d.pop(AUTO_SERIALIZED, False): return SimpleNamespace(**d) diff --git a/durabletask/task.py b/durabletask/task.py index a9f85de2..3eaf9a2f 100644 --- a/durabletask/task.py +++ b/durabletask/task.py @@ -7,19 +7,17 @@ import math from abc import ABC, abstractmethod from datetime import datetime, timedelta -from typing import (Any, Callable, Generator, Generic, List, Optional, TypeVar, - Union) +from typing import Any, Callable, Generator, Generic, Optional, TypeVar, Union import durabletask.internal.helpers as pbh import durabletask.internal.orchestrator_service_pb2 as pb -T = TypeVar('T') -TInput = TypeVar('TInput') -TOutput = TypeVar('TOutput') +T = TypeVar("T") +TInput = TypeVar("TInput") +TOutput = TypeVar("TOutput") class OrchestrationContext(ABC): - @property @abstractmethod def instance_id(self) -> str: @@ -72,8 +70,13 @@ def is_replaying(self) -> bool: pass @abstractmethod - def set_custom_status(self, custom_status: str) -> None: - """Set the custom status. + def set_custom_status(self, custom_status: Any) -> None: + """Set the orchestration instance's custom status. + + Parameters + ---------- + custom_status: Any + A JSON-serializable custom status value to set. """ pass @@ -94,9 +97,14 @@ def create_timer(self, fire_at: Union[datetime, timedelta]) -> Task: pass @abstractmethod - def call_activity(self, activity: Union[Activity[TInput, TOutput], str], *, - input: Optional[TInput] = None, - retry_policy: Optional[RetryPolicy] = None) -> Task[TOutput]: + def call_activity( + self, + activity: Union[Activity[TInput, TOutput], str], + *, + input: Optional[TInput] = None, + retry_policy: Optional[RetryPolicy] = None, + app_id: Optional[str] = None, + ) -> Task[TOutput]: """Schedule an activity for execution. Parameters @@ -107,6 +115,8 @@ def call_activity(self, activity: Union[Activity[TInput, TOutput], str], *, The JSON-serializable input (or None) to pass to the activity. retry_policy: Optional[RetryPolicy] The retry policy to use for this activity call. + app_id: Optional[str] + The app ID that will execute the activity. If not specified, the activity will be executed by the same app as the orchestrator. Returns ------- @@ -116,10 +126,15 @@ def call_activity(self, activity: Union[Activity[TInput, TOutput], str], *, pass @abstractmethod - def call_sub_orchestrator(self, orchestrator: Orchestrator[TInput, TOutput], *, - input: Optional[TInput] = None, - instance_id: Optional[str] = None, - retry_policy: Optional[RetryPolicy] = None) -> Task[TOutput]: + def call_sub_orchestrator( + self, + orchestrator: Orchestrator[TInput, TOutput], + *, + input: Optional[TInput] = None, + instance_id: Optional[str] = None, + retry_policy: Optional[RetryPolicy] = None, + app_id: Optional[str] = None, + ) -> Task[TOutput]: """Schedule sub-orchestrator function for execution. Parameters @@ -133,6 +148,8 @@ def call_sub_orchestrator(self, orchestrator: Orchestrator[TInput, TOutput], *, random UUID will be used. retry_policy: Optional[RetryPolicy] The retry policy to use for this sub-orchestrator call. + app_id: Optional[str] + The app ID that will execute the sub-orchestrator. If not specified, the sub-orchestrator will be executed by the same app as the orchestrator. Returns ------- @@ -172,6 +189,22 @@ def continue_as_new(self, new_input: Any, *, save_events: bool = False) -> None: """ pass + @abstractmethod + def is_patched(self, patch_name: str) -> bool: + """Check if the given patch name can be applied to the orchestration. + + Parameters + ---------- + patch_name : str + The name of the patch to check. + + Returns + ------- + bool + True if the given patch name can be applied to the orchestration, False otherwise. + """ + pass + class FailureDetails: def __init__(self, message: str, error_type: str, stack_trace: Optional[str]): @@ -200,7 +233,8 @@ def __init__(self, message: str, details: pb.TaskFailureDetails): self._details = FailureDetails( details.errorMessage, details.errorType, - details.stackTrace.value if not pbh.is_empty(details.stackTrace) else None) + details.stackTrace.value if not pbh.is_empty(details.stackTrace) else None, + ) @property def details(self) -> FailureDetails: @@ -215,8 +249,32 @@ class OrchestrationStateError(Exception): pass +class NonRetryableError(Exception): + """Exception indicating the operation should not be retried. + + If an activity or sub-orchestration raises this exception, retry logic will be + bypassed and the failure will be returned immediately to the orchestrator. + """ + + pass + + +def is_error_non_retryable(error_type: str, policy: RetryPolicy) -> bool: + """Checks whether an error type is non-retryable.""" + is_non_retryable = False + if error_type == "NonRetryableError": + is_non_retryable = True + elif ( + policy.non_retryable_error_types is not None + and error_type in policy.non_retryable_error_types + ): + is_non_retryable = True + return is_non_retryable + + class Task(ABC, Generic[T]): """Abstract base class for asynchronous tasks in a durable orchestration.""" + _result: T _exception: Optional[TaskFailedError] _parent: Optional[CompositeTask[T]] @@ -240,7 +298,7 @@ def is_failed(self) -> bool: def get_result(self) -> T: """Returns the result of the task.""" if not self._is_complete: - raise ValueError('The task has not completed.') + raise ValueError("The task has not completed.") elif self._exception is not None: raise self._exception return self._result @@ -248,15 +306,16 @@ def get_result(self) -> T: def get_exception(self) -> TaskFailedError: """Returns the exception that caused the task to fail.""" if self._exception is None: - raise ValueError('The task has not failed.') + raise ValueError("The task has not failed.") return self._exception class CompositeTask(Task[T]): """A task that is composed of other tasks.""" - _tasks: List[Task] - def __init__(self, tasks: List[Task]): + _tasks: list[Task] + + def __init__(self, tasks: list[Task]): super().__init__() self._tasks = tasks self._completed_tasks = 0 @@ -266,20 +325,25 @@ def __init__(self, tasks: List[Task]): if task.is_complete: self.on_child_completed(task) - def get_tasks(self) -> List[Task]: + def get_tasks(self) -> list[Task]: return self._tasks @abstractmethod def on_child_completed(self, task: Task[T]): pass -class WhenAllTask(CompositeTask[List[T]]): + +class WhenAllTask(CompositeTask[list[T]]): """A task that completes when all of its child tasks complete.""" - def __init__(self, tasks: List[Task[T]]): + def __init__(self, tasks: list[Task[T]]): super().__init__(tasks) self._completed_tasks = 0 self._failed_tasks = 0 + # If there are no child tasks, this composite should complete immediately + if len(self._tasks) == 0: + self._result = [] # type: ignore[assignment] + self._is_complete = True @property def pending_tasks(self) -> int: @@ -288,7 +352,7 @@ def pending_tasks(self) -> int: def on_child_completed(self, task: Task[T]): if self.is_complete: - raise ValueError('The task has already completed.') + raise ValueError("The task has already completed.") self._completed_tasks += 1 if task.is_failed and self._exception is None: self._exception = task.get_exception() @@ -297,20 +361,21 @@ def on_child_completed(self, task: Task[T]): # The order of the result MUST match the order of the tasks provided to the constructor. self._result = [task.get_result() for task in self._tasks] self._is_complete = True + if self._parent is not None: + self._parent.on_child_completed(self) def get_completed_tasks(self) -> int: return self._completed_tasks class CompletableTask(Task[T]): - def __init__(self): super().__init__() self._retryable_parent = None def complete(self, result: T): if self._is_complete: - raise ValueError('The task has already completed.') + raise ValueError("The task has already completed.") self._result = result self._is_complete = True if self._parent is not None: @@ -318,7 +383,7 @@ def complete(self, result: T): def fail(self, message: str, details: pb.TaskFailureDetails): if self._is_complete: - raise ValueError('The task has already completed.') + raise ValueError("The task has already completed.") self._exception = TaskFailedError(message, details) self._is_complete = True if self._parent is not None: @@ -328,8 +393,13 @@ def fail(self, message: str, details: pb.TaskFailureDetails): class RetryableTask(CompletableTask[T]): """A task that can be retried according to a retry policy.""" - def __init__(self, retry_policy: RetryPolicy, action: pb.OrchestratorAction, - start_time:datetime, is_sub_orch: bool) -> None: + def __init__( + self, + retry_policy: RetryPolicy, + action: pb.OrchestratorAction, + start_time: datetime, + is_sub_orch: bool, + ) -> None: super().__init__() self._action = action self._retry_policy = retry_policy @@ -339,32 +409,39 @@ def __init__(self, retry_policy: RetryPolicy, action: pb.OrchestratorAction, def increment_attempt_count(self) -> None: self._attempt_count += 1 - - def compute_next_delay(self) -> Union[timedelta, None]: + + def compute_next_delay(self) -> Optional[timedelta]: if self._attempt_count >= self._retry_policy.max_number_of_attempts: return None retry_expiration: datetime = datetime.max - if self._retry_policy.retry_timeout is not None and self._retry_policy.retry_timeout != datetime.max: + if ( + self._retry_policy.retry_timeout is not None + and self._retry_policy.retry_timeout != datetime.max + ): retry_expiration = self._start_time + self._retry_policy.retry_timeout - + if self._retry_policy.backoff_coefficient is None: backoff_coefficient = 1.0 else: backoff_coefficient = self._retry_policy.backoff_coefficient if datetime.utcnow() < retry_expiration: - next_delay_f = math.pow(backoff_coefficient, self._attempt_count - 1) * self._retry_policy.first_retry_interval.total_seconds() + next_delay_f = ( + math.pow(backoff_coefficient, self._attempt_count - 1) + * self._retry_policy.first_retry_interval.total_seconds() + ) if self._retry_policy.max_retry_interval is not None: - next_delay_f = min(next_delay_f, self._retry_policy.max_retry_interval.total_seconds()) - return timedelta(seconds=next_delay_f) + next_delay_f = min( + next_delay_f, self._retry_policy.max_retry_interval.total_seconds() + ) + return timedelta(seconds=next_delay_f) return None class TimerTask(CompletableTask[T]): - def __init__(self) -> None: super().__init__() @@ -375,22 +452,28 @@ def set_retryable_parent(self, retryable_task: RetryableTask): class WhenAnyTask(CompositeTask[Task]): """A task that completes when any of its child tasks complete.""" - def __init__(self, tasks: List[Task]): + def __init__(self, tasks: list[Task]): super().__init__(tasks) + # If there are no child tasks, complete immediately with an empty result + if len(self._tasks) == 0: + self._result = [] # type: ignore[assignment] + self._is_complete = True def on_child_completed(self, task: Task): # The first task to complete is the result of the WhenAnyTask. if not self.is_complete: self._is_complete = True self._result = task + if self._parent is not None: + self._parent.on_child_completed(self) -def when_all(tasks: List[Task[T]]) -> WhenAllTask[T]: +def when_all(tasks: list[Task[T]]) -> WhenAllTask[T]: """Returns a task that completes when all of the provided tasks complete or when one of the tasks fail.""" return WhenAllTask(tasks) -def when_any(tasks: List[Task]) -> WhenAnyTask: +def when_any(tasks: list[Task]) -> WhenAnyTask: """Returns a task that completes when any of the provided tasks complete or fail.""" return WhenAnyTask(tasks) @@ -438,12 +521,16 @@ def task_id(self) -> int: class RetryPolicy: """Represents the retry policy for an orchestration or activity function.""" - def __init__(self, *, - first_retry_interval: timedelta, - max_number_of_attempts: int, - backoff_coefficient: Optional[float] = 1.0, - max_retry_interval: Optional[timedelta] = None, - retry_timeout: Optional[timedelta] = None): + def __init__( + self, + *, + first_retry_interval: timedelta, + max_number_of_attempts: int, + backoff_coefficient: Optional[float] = 1.0, + max_retry_interval: Optional[timedelta] = None, + retry_timeout: Optional[timedelta] = None, + non_retryable_error_types: Optional[list[Union[str, type]]] = None, + ): """Creates a new RetryPolicy instance. Parameters @@ -458,24 +545,39 @@ def __init__(self, *, The maximum retry interval to use for any retry attempt. retry_timeout : Optional[timedelta] The maximum amount of time to spend retrying the operation. + non_retryable_error_types : Optional[list[Union[str, type]]] + A list of exception type names or classes that should not be retried. + If a failure's error type matches any of these, the task fails immediately. + The built-in NonRetryableError is always treated as non-retryable regardless + of this setting. """ # validate inputs if first_retry_interval < timedelta(seconds=0): - raise ValueError('first_retry_interval must be >= 0') + raise ValueError("first_retry_interval must be >= 0") if max_number_of_attempts < 1: - raise ValueError('max_number_of_attempts must be >= 1') + raise ValueError("max_number_of_attempts must be >= 1") if backoff_coefficient is not None and backoff_coefficient < 1: - raise ValueError('backoff_coefficient must be >= 1') + raise ValueError("backoff_coefficient must be >= 1") if max_retry_interval is not None and max_retry_interval < timedelta(seconds=0): - raise ValueError('max_retry_interval must be >= 0') + raise ValueError("max_retry_interval must be >= 0") if retry_timeout is not None and retry_timeout < timedelta(seconds=0): - raise ValueError('retry_timeout must be >= 0') + raise ValueError("retry_timeout must be >= 0") self._first_retry_interval = first_retry_interval self._max_number_of_attempts = max_number_of_attempts self._backoff_coefficient = backoff_coefficient self._max_retry_interval = max_retry_interval self._retry_timeout = retry_timeout + # Normalize non-retryable error type names to a set of strings + names: Optional[set[str]] = None + if non_retryable_error_types: + names = set[str]() + for t in non_retryable_error_types: + if isinstance(t, str) and t: + names.add(t) + elif isinstance(t, type): + names.add(t.__name__) + self._non_retryable_error_types = names @property def first_retry_interval(self) -> timedelta: @@ -502,11 +604,22 @@ def retry_timeout(self) -> Optional[timedelta]: """The maximum amount of time to spend retrying the operation.""" return self._retry_timeout + @property + def non_retryable_error_types(self) -> Optional[set[str]]: + """Set of error type names that should not be retried. + + Comparison is performed against the errorType string provided by the + backend (typically the exception class name). + """ + return self._non_retryable_error_types + def get_name(fn: Callable) -> str: """Returns the name of the provided function""" name = fn.__name__ - if name == '': - raise ValueError('Cannot infer a name from a lambda function. Please provide a name explicitly.') + if name == "": + raise ValueError( + "Cannot infer a name from a lambda function. Please provide a name explicitly." + ) return name diff --git a/durabletask/worker.py b/durabletask/worker.py index bcc1a30e..2d87d6e7 100644 --- a/durabletask/worker.py +++ b/durabletask/worker.py @@ -1,59 +1,151 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -import concurrent.futures +import asyncio +import inspect import logging +import os +import random +import threading +from concurrent.futures import ThreadPoolExecutor from datetime import datetime, timedelta from threading import Event, Thread from types import GeneratorType -from typing import (Any, Dict, Generator, List, Optional, Sequence, Tuple, - TypeVar, Union) +from typing import Any, Generator, Optional, Sequence, TypeVar, Union import grpc -from google.protobuf import empty_pb2, wrappers_pb2 +from google.protobuf import empty_pb2 import durabletask.internal.helpers as ph -import durabletask.internal.helpers as pbh import durabletask.internal.orchestrator_service_pb2 as pb import durabletask.internal.orchestrator_service_pb2_grpc as stubs import durabletask.internal.shared as shared -from durabletask import task +from durabletask import deterministic, task +from durabletask.internal.grpc_interceptor import DefaultClientInterceptorImpl -TInput = TypeVar('TInput') -TOutput = TypeVar('TOutput') +TInput = TypeVar("TInput") +TOutput = TypeVar("TOutput") +class VersionNotRegisteredException(Exception): + pass -class _Registry: +def _log_all_threads(logger: logging.Logger, context: str = ""): + """Helper function to log all currently active threads for debugging.""" + active_threads = threading.enumerate() + thread_info = [] + for t in active_threads: + thread_info.append( + f"name='{t.name}', id={t.ident}, daemon={t.daemon}, alive={t.is_alive()}" + ) + logger.debug( + f"[THREAD_TRACE] {context} Active threads ({len(active_threads)}): {', '.join(thread_info)}" + ) + + +class ConcurrencyOptions: + """Configuration options for controlling concurrency of different work item types and the thread pool size. + + This class provides fine-grained control over concurrent processing limits for + activities, orchestrations and the thread pool size. + """ + + def __init__( + self, + maximum_concurrent_activity_work_items: Optional[int] = None, + maximum_concurrent_orchestration_work_items: Optional[int] = None, + maximum_thread_pool_workers: Optional[int] = None, + ): + """Initialize concurrency options. + + Args: + maximum_concurrent_activity_work_items: Maximum number of activity work items + that can be processed concurrently. Defaults to 100 * processor_count. + maximum_concurrent_orchestration_work_items: Maximum number of orchestration work items + that can be processed concurrently. Defaults to 100 * processor_count. + maximum_thread_pool_workers: Maximum number of thread pool workers to use. + """ + processor_count = os.cpu_count() or 1 + default_concurrency = 100 * processor_count + # see https://docs.python.org/3/library/concurrent.futures.html + default_max_workers = processor_count + 4 + + self.maximum_concurrent_activity_work_items = ( + maximum_concurrent_activity_work_items + if maximum_concurrent_activity_work_items is not None + else default_concurrency + ) + + self.maximum_concurrent_orchestration_work_items = ( + maximum_concurrent_orchestration_work_items + if maximum_concurrent_orchestration_work_items is not None + else default_concurrency + ) + + self.maximum_thread_pool_workers = ( + maximum_thread_pool_workers + if maximum_thread_pool_workers is not None + else default_max_workers + ) - orchestrators: Dict[str, task.Orchestrator] - activities: Dict[str, task.Activity] + +class _Registry: + orchestrators: dict[str, task.Orchestrator] + versioned_orchestrators: dict[str, dict[str, task.Orchestrator]] + latest_versioned_orchestrators_version_name: dict[str, str] + activities: dict[str, task.Activity] def __init__(self): self.orchestrators = {} + self.versioned_orchestrators = {} + self.latest_versioned_orchestrators_version_name = {} self.activities = {} - def add_orchestrator(self, fn: task.Orchestrator) -> str: + def add_orchestrator(self, fn: task.Orchestrator, version_name: Optional[str] = None, is_latest: bool = False) -> str: if fn is None: - raise ValueError('An orchestrator function argument is required.') + raise ValueError("An orchestrator function argument is required.") name = task.get_name(fn) - self.add_named_orchestrator(name, fn) + self.add_named_orchestrator(name, fn, version_name, is_latest) return name - def add_named_orchestrator(self, name: str, fn: task.Orchestrator) -> None: + def add_named_orchestrator(self, name: str, fn: task.Orchestrator, version_name: Optional[str] = None, is_latest: bool = False) -> None: if not name: - raise ValueError('A non-empty orchestrator name is required.') + raise ValueError("A non-empty orchestrator name is required.") + + if version_name is None: + if name in self.orchestrators: + raise ValueError(f"A '{name}' orchestrator already exists.") + self.orchestrators[name] = fn + else: + if name not in self.versioned_orchestrators: + self.versioned_orchestrators[name] = {} + if version_name in self.versioned_orchestrators[name]: + raise ValueError(f"The version '{version_name}' of '{name}' orchestrator already exists.") + self.versioned_orchestrators[name][version_name] = fn + if is_latest: + self.latest_versioned_orchestrators_version_name[name] = version_name + + def get_orchestrator(self, name: str, version_name: Optional[str] = None) -> Optional[tuple[task.Orchestrator, str]]: if name in self.orchestrators: - raise ValueError(f"A '{name}' orchestrator already exists.") + return self.orchestrators.get(name), None + + if name in self.versioned_orchestrators: + if version_name: + version_to_use = version_name + elif name in self.latest_versioned_orchestrators_version_name: + version_to_use = self.latest_versioned_orchestrators_version_name[name] + else: + return None, None - self.orchestrators[name] = fn + if version_to_use not in self.versioned_orchestrators[name]: + raise VersionNotRegisteredException + return self.versioned_orchestrators[name].get(version_to_use), version_to_use - def get_orchestrator(self, name: str) -> Optional[task.Orchestrator]: - return self.orchestrators.get(name) + return None, None def add_activity(self, fn: task.Activity) -> str: if fn is None: - raise ValueError('An activity function argument is required.') + raise ValueError("An activity function argument is required.") name = task.get_name(fn) self.add_named_activity(name, fn) @@ -61,7 +153,7 @@ def add_activity(self, fn: task.Activity) -> str: def add_named_activity(self, name: str, fn: task.Activity) -> None: if not name: - raise ValueError('A non-empty activity name is required.') + raise ValueError("A non-empty activity name is required.") if name in self.activities: raise ValueError(f"A '{name}' activity already exists.") @@ -73,30 +165,145 @@ def get_activity(self, name: str) -> Optional[task.Activity]: class OrchestratorNotRegisteredError(ValueError): """Raised when attempting to start an orchestration that is not registered""" + pass class ActivityNotRegisteredError(ValueError): """Raised when attempting to call an activity that is not registered""" + pass +# TODO: refactor this to closely match durabletask-go/client/worker_grpc.go instead of this. class TaskHubGrpcWorker: - _response_stream: Optional[grpc.Future] = None + """A gRPC-based worker for processing durable task orchestrations and activities. + + This worker connects to a Durable Task backend service via gRPC to receive and process + work items including orchestration functions and activity functions. It provides + concurrent execution capabilities with configurable limits and automatic retry handling. + + The worker manages the complete lifecycle: + - Registers orchestrator and activity functions + - Connects to the gRPC backend service + - Receives work items and executes them concurrently + - Handles failures, retries, and state management + - Provides logging and monitoring capabilities + + Args: + host_address (Optional[str], optional): The gRPC endpoint address of the backend service. + Defaults to the value from environment variables or localhost. + metadata (Optional[list[tuple[str, str]]], optional): gRPC metadata to include with + requests. Used for authentication and routing. Defaults to None. + log_handler (optional): Custom logging handler for worker logs. Defaults to None. + log_formatter (Optional[logging.Formatter], optional): Custom log formatter. + Defaults to None. + secure_channel (bool, optional): Whether to use a secure gRPC channel (TLS). + Defaults to False. + interceptors (Optional[Sequence[shared.ClientInterceptor]], optional): Custom gRPC + interceptors to apply to the channel. Defaults to None. + concurrency_options (Optional[ConcurrencyOptions], optional): Configuration for + controlling worker concurrency limits. If None, default settings are used. + stop_timeout (float, optional): Maximum time in seconds to wait for the worker thread + to stop when calling stop(). Defaults to 30.0. Useful to set lower values in tests. + + Attributes: + concurrency_options (ConcurrencyOptions): The current concurrency configuration. + + Example: + Basic worker setup: + + >>> from durabletask.worker import TaskHubGrpcWorker, ConcurrencyOptions + >>> + >>> # Create worker with custom concurrency settings + >>> concurrency = ConcurrencyOptions( + ... maximum_concurrent_activity_work_items=50, + ... maximum_concurrent_orchestration_work_items=20 + ... ) + >>> worker = TaskHubGrpcWorker( + ... host_address="localhost:4001", + ... concurrency_options=concurrency + ... ) + >>> + >>> # Register functions + >>> @worker.add_orchestrator + ... def my_orchestrator(context, input): + ... result = yield context.call_activity("my_activity", input="hello") + ... return result + >>> + >>> @worker.add_activity + ... def my_activity(context, input): + ... return f"Processed: {input}" + >>> + >>> # Start the worker + >>> worker.start() + >>> # ... worker runs in background thread + >>> worker.stop() + + Using as context manager: + + >>> with TaskHubGrpcWorker() as worker: + ... worker.add_orchestrator(my_orchestrator) + ... worker.add_activity(my_activity) + ... worker.start() + ... # Worker automatically stops when exiting context + + Raises: + RuntimeError: If attempting to add orchestrators/activities while the worker is running, + or if starting a worker that is already running. + OrchestratorNotRegisteredError: If an orchestration work item references an + unregistered orchestrator function. + ActivityNotRegisteredError: If an activity work item references an unregistered + activity function. + """ - def __init__(self, *, - host_address: Optional[str] = None, - metadata: Optional[List[Tuple[str, str]]] = None, - log_handler=None, - log_formatter: Optional[logging.Formatter] = None, - secure_channel: bool = False): + _response_stream: Optional[grpc.Future] = None + _interceptors: Optional[list[shared.ClientInterceptor]] = None + + def __init__( + self, + *, + host_address: Optional[str] = None, + metadata: Optional[list[tuple[str, str]]] = None, + log_handler=None, + log_formatter: Optional[logging.Formatter] = None, + secure_channel: bool = False, + interceptors: Optional[Sequence[shared.ClientInterceptor]] = None, + concurrency_options: Optional[ConcurrencyOptions] = None, + channel_options: Optional[Sequence[tuple[str, Any]]] = None, + stop_timeout: float = 30.0, + ): self._registry = _Registry() self._host_address = host_address if host_address else shared.get_default_host_address() - self._metadata = metadata self._logger = shared.get_logger("worker", log_handler, log_formatter) self._shutdown = Event() self._is_running = False self._secure_channel = secure_channel + self._channel_options = channel_options + self._stop_timeout = stop_timeout + self._current_channel: Optional[grpc.Channel] = None # Store channel reference for cleanup + + # Use provided concurrency options or create default ones + self._concurrency_options = ( + concurrency_options if concurrency_options is not None else ConcurrencyOptions() + ) + + # Determine the interceptors to use + if interceptors is not None: + self._interceptors = list(interceptors) + if metadata: + self._interceptors.append(DefaultClientInterceptorImpl(metadata)) + elif metadata: + self._interceptors = [DefaultClientInterceptorImpl(metadata)] + else: + self._interceptors = None + + self._async_worker_manager = _AsyncWorkerManager(self._concurrency_options) + + @property + def concurrency_options(self) -> ConcurrencyOptions: + """Get the current concurrency options for this worker.""" + return self._concurrency_options def __enter__(self): return self @@ -107,101 +314,491 @@ def __exit__(self, type, value, traceback): def add_orchestrator(self, fn: task.Orchestrator) -> str: """Registers an orchestrator function with the worker.""" if self._is_running: - raise RuntimeError('Orchestrators cannot be added while the worker is running.') + raise RuntimeError("Orchestrators cannot be added while the worker is running.") return self._registry.add_orchestrator(fn) def add_activity(self, fn: task.Activity) -> str: """Registers an activity function with the worker.""" if self._is_running: - raise RuntimeError('Activities cannot be added while the worker is running.') + raise RuntimeError("Activities cannot be added while the worker is running.") return self._registry.add_activity(fn) def start(self): """Starts the worker on a background thread and begins listening for work items.""" - channel = shared.get_grpc_channel(self._host_address, self._metadata, self._secure_channel) - stub = stubs.TaskHubSidecarServiceStub(channel) - if self._is_running: - raise RuntimeError('The worker is already running.') + raise RuntimeError("The worker is already running.") def run_loop(): - # TODO: Investigate whether asyncio could be used to enable greater concurrency for async activity - # functions. We'd need to know ahead of time whether a function is async or not. - # TODO: Max concurrency configuration settings - with concurrent.futures.ThreadPoolExecutor(max_workers=16) as executor: - while not self._shutdown.is_set(): - try: - # send a "Hello" message to the sidecar to ensure that it's listening - stub.Hello(empty_pb2.Empty()) - - # stream work items - self._response_stream = stub.GetWorkItems(pb.GetWorkItemsRequest()) - self._logger.info(f'Successfully connected to {self._host_address}. Waiting for work items...') - - # The stream blocks until either a work item is received or the stream is canceled - # by another thread (see the stop() method). - for work_item in self._response_stream: - request_type = work_item.WhichOneof('request') - self._logger.debug(f'Received "{request_type}" work item') - if work_item.HasField('orchestratorRequest'): - executor.submit(self._execute_orchestrator, work_item.orchestratorRequest, stub) - elif work_item.HasField('activityRequest'): - executor.submit(self._execute_activity, work_item.activityRequest, stub) - else: - self._logger.warning(f'Unexpected work item type: {request_type}') - - except grpc.RpcError as rpc_error: - if rpc_error.code() == grpc.StatusCode.CANCELLED: # type: ignore - self._logger.info(f'Disconnected from {self._host_address}') - elif rpc_error.code() == grpc.StatusCode.UNAVAILABLE: # type: ignore - self._logger.warning( - f'The sidecar at address {self._host_address} is unavailable - will continue retrying') - else: - self._logger.warning(f'Unexpected error: {rpc_error}') - except Exception as ex: - self._logger.warning(f'Unexpected error: {ex}') - - # CONSIDER: exponential backoff - self._shutdown.wait(5) - self._logger.info("No longer listening for work items") - return + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + loop.run_until_complete(self._async_run_loop()) self._logger.info(f"Starting gRPC worker that connects to {self._host_address}") - self._runLoop = Thread(target=run_loop) + self._runLoop = Thread(target=run_loop, name="WorkerRunLoop") self._runLoop.start() self._is_running = True + # TODO: refactor this to be more readable and maintainable. + async def _async_run_loop(self): + """ + This is the main async loop that runs the worker. + It is responsible for: + - Creating a fresh connection to the sidecar + - Reading work items from the sidecar + - Executing the work items + - Shutting down the worker + """ + worker_task = asyncio.create_task(self._async_worker_manager.run()) + # Connection state management for retry fix + current_channel = None + current_stub = None + current_reader_thread = None + conn_retry_count = 0 + conn_max_retry_delay = 60 + + def create_fresh_connection(): + nonlocal current_channel, current_stub, conn_retry_count + if current_channel: + try: + current_channel.close() + except Exception: + pass + current_channel = None + current_stub = None + try: + current_channel = shared.get_grpc_channel( + self._host_address, + self._secure_channel, + self._interceptors, + options=self._channel_options, + ) + # Store channel reference for cleanup in stop() + self._current_channel = current_channel + current_stub = stubs.TaskHubSidecarServiceStub(current_channel) + current_stub.Hello(empty_pb2.Empty()) + conn_retry_count = 0 + self._logger.info(f"Created fresh connection to {self._host_address}") + except Exception as e: + self._logger.warning(f"Failed to create connection: {e}") + current_channel = None + self._current_channel = None + current_stub = None + raise + + def invalidate_connection(): + nonlocal current_channel, current_stub, current_reader_thread + # Cancel the response stream first to signal the reader thread to stop + if self._response_stream is not None: + try: + self._response_stream.cancel() + except Exception: + pass + self._response_stream = None + + # Wait for the reader thread to finish + if current_reader_thread is not None: + current_reader_thread.join(timeout=1) + current_reader_thread = None + + # Close the channel + if current_channel: + try: + current_channel.close() + except Exception: + pass + current_channel = None + self._current_channel = None + current_stub = None + + def should_invalidate_connection(rpc_error): + error_code = rpc_error.code() # type: ignore + connection_level_errors = { + grpc.StatusCode.UNAVAILABLE, + grpc.StatusCode.DEADLINE_EXCEEDED, + grpc.StatusCode.CANCELLED, + grpc.StatusCode.UNAUTHENTICATED, + grpc.StatusCode.ABORTED, + } + return error_code in connection_level_errors + + while True: + if self._shutdown.is_set(): + break + if current_stub is None: + try: + create_fresh_connection() + except Exception: + conn_retry_count += 1 + delay = min( + conn_max_retry_delay, + (2 ** min(conn_retry_count, 6)) + random.uniform(0, 1), + ) + self._logger.warning( + f"Connection failed, retrying in {delay:.2f} seconds (attempt {conn_retry_count})" + ) + if self._shutdown.wait(delay): + break + continue + try: + assert current_stub is not None + stub = current_stub + get_work_items_request = pb.GetWorkItemsRequest( + maxConcurrentOrchestrationWorkItems=self._concurrency_options.maximum_concurrent_orchestration_work_items, + maxConcurrentActivityWorkItems=self._concurrency_options.maximum_concurrent_activity_work_items, + ) + self._response_stream = stub.GetWorkItems(get_work_items_request) + self._logger.info( + f"Successfully connected to {self._host_address}. Waiting for work items..." + ) + + # Use a thread to read from the blocking gRPC stream and forward to asyncio + import queue + + work_item_queue = queue.Queue() + SHUTDOWN_SENTINEL = None + + # NOTE: This is equivalent to the Durabletask Go goroutine calling stream.Recv() in worker_grpc.go StartWorkItemListener() + def stream_reader(): + try: + stream = self._response_stream + + # Use next() to allow shutdown check between items + # This matches Go's pattern: check ctx.Err() after each stream.Recv() + while True: + if self._shutdown.is_set(): + break + + try: + # NOTE: next(stream) blocks until gRPC returns the next work item or cancels the stream. + # There is no way to interrupt this blocking call in Python gRPC. When shutdown is + # initiated, the channel closure propagates to this call, which can take several seconds. + # The thread will exit once gRPC raises grpc.RpcError with StatusCode.CANCELLED. + work_item = next(stream) + # Check shutdown again after getting item (in case shutdown happened during next()) + if self._shutdown.is_set(): + break + work_item_queue.put(work_item) + except StopIteration: + # stream ended naturally + break + except grpc.RpcError as rpc_error: + # Check if this is due to shutdown/cancellation + if ( + self._shutdown.is_set() + or rpc_error.code() == grpc.StatusCode.CANCELLED + ): + self._logger.debug( + f"Stream reader: stream cancelled during shutdown (code={rpc_error.code()})" + ) + break + # Other RPC errors - put in queue for async loop to handle + self._logger.warning( + f"Stream reader: RPC error (code={rpc_error.code()}): {rpc_error}" + ) + break + except Exception as stream_error: + # Check if this is due to shutdown + if self._shutdown.is_set(): + self._logger.debug( + f"Stream reader: exception during shutdown: {type(stream_error).__name__}: {stream_error}" + ) + # Other stream errors - put in queue for async loop to handle + self._logger.warning( + f"Stream reader: unexpected error: {stream_error}" + ) + raise + + except Exception as e: + if not self._shutdown.is_set(): + work_item_queue.put(e) + finally: + # signal that the stream reader is done (ie matching Go's context cancellation) + try: + work_item_queue.put(SHUTDOWN_SENTINEL) + except Exception: + # queue might be closed so ignore this + pass + + import threading + + # Use non-daemon thread (daemon=False) to ensure proper resource cleanup. + # Daemon threads exit immediately when the main program exits, which prevents + # cleanup of gRPC channel resources and OTel interceptors. Non-daemon threads + # block shutdown until they complete, ensuring all resources are properly closed. + current_reader_thread = threading.Thread( + target=stream_reader, daemon=False, name="StreamReader" + ) + current_reader_thread.start() + loop = asyncio.get_running_loop() + + # NOTE: This is a blocking call that will wait for a work item to become available or the shutdown sentinel + while not self._shutdown.is_set(): + try: + # Use timeout to allow shutdown check (mimicing Go's select with ctx.Done()) + work_item = await loop.run_in_executor( + None, lambda: work_item_queue.get(timeout=0.1) + ) + # Essentially check for ctx.Done() in Go + if work_item == SHUTDOWN_SENTINEL: + break + + if self._async_worker_manager._shutdown or loop.is_closed(): + self._logger.debug( + "Async worker manager shut down or loop closed, exiting work item processing" + ) + break + if isinstance(work_item, Exception): + raise work_item + request_type = work_item.WhichOneof("request") + self._logger.debug(f'Received "{request_type}" work item') + if work_item.HasField("orchestratorRequest"): + self._async_worker_manager.submit_orchestration( + self._execute_orchestrator, + work_item.orchestratorRequest, + stub, + work_item.completionToken, + ) + elif work_item.HasField("activityRequest"): + self._async_worker_manager.submit_activity( + self._execute_activity, + work_item.activityRequest, + stub, + work_item.completionToken, + ) + elif work_item.HasField("healthPing"): + pass + else: + self._logger.warning(f"Unexpected work item type: {request_type}") + except queue.Empty: + continue + except grpc.RpcError: + raise # let it be captured/parsed by outer except and avoid noisy log + except Exception as e: + if self._async_worker_manager._shutdown or loop.is_closed(): + break + invalidate_connection() + raise e + current_reader_thread.join(timeout=1) + + if self._shutdown.is_set(): + self._logger.info(f"Disconnected from {self._host_address}") + else: + self._logger.info("Work item stream ended normally") + # When stream ends (SHUTDOWN_SENTINEL received), always break outer loop + # The stream reader has exited, so we should exit too, not reconnect + # This matches Go SDK behavior where stream ending causes the listener to exit + break + except grpc.RpcError as rpc_error: + # Check shutdown first - if shutting down, exit immediately + if self._shutdown.is_set(): + self._logger.debug("Shutdown detected during RPC error handling, exiting") + break + should_invalidate = should_invalidate_connection(rpc_error) + if should_invalidate: + invalidate_connection() + error_code = rpc_error.code() # type: ignore + error_details = str(rpc_error) + + if error_code == grpc.StatusCode.CANCELLED: + self._logger.info(f"Disconnected from {self._host_address}") + break + elif error_code == grpc.StatusCode.UNAVAILABLE: + # Check if this is a connection timeout scenario + if ( + "Timeout occurred" in error_details + or "Failed to connect to remote host" in error_details + ): + self._logger.warning( + f"Connection timeout to {self._host_address}: {error_details} - will retry with fresh connection" + ) + else: + self._logger.warning( + f"The sidecar at address {self._host_address} is unavailable: {error_details} - will continue retrying" + ) + elif should_invalidate: + self._logger.warning( + f"Connection-level gRPC error ({error_code}): {rpc_error} - resetting connection" + ) + else: + self._logger.warning( + f"Application-level gRPC error ({error_code}): {rpc_error}" + ) + except RuntimeError as ex: + # RuntimeError often indicates asyncio loop issues (e.g., "cannot schedule new futures after shutdown") + # Check shutdown state first + if self._shutdown.is_set(): + self._logger.debug( + f"Shutdown detected during RuntimeError handling, exiting: {ex}" + ) + break + # Check if async worker manager is shut down or loop is closed + try: + loop = asyncio.get_running_loop() + if self._async_worker_manager._shutdown or loop.is_closed(): + self._logger.debug( + f"Async worker manager shut down or loop closed, exiting: {ex}" + ) + break + except RuntimeError: + # No event loop running, treat as shutdown + self._logger.debug(f"No event loop running, exiting: {ex}") + break + # If we can't get the loop or it's in a bad state, and we got a RuntimeError, + # it's likely shutdown-related. Break to prevent infinite retries. + break + except Exception as ex: + if self._shutdown.is_set(): + self._logger.debug( + f"Shutdown detected during exception handling, exiting: {ex}" + ) + break + # Check if async worker manager is shut down or loop is closed + try: + loop = asyncio.get_running_loop() + if self._async_worker_manager._shutdown or loop.is_closed(): + self._logger.debug( + f"Async worker manager shut down or loop closed, exiting: {ex}" + ) + break + except RuntimeError: + # No event loop running, treat as shutdown + self._logger.debug(f"No event loop running, exiting: {ex}") + break + invalidate_connection() + self._logger.warning(f"Unexpected error: {ex}") + invalidate_connection() + self._logger.info("No longer listening for work items") + + # Cancel worker_task to ensure shutdown completes even if tasks are still running + worker_task.cancel() + try: + # Wait for cancellation to complete, with a timeout to prevent indefinite waiting + await asyncio.wait_for(worker_task, timeout=5.0) + except asyncio.CancelledError: + self._logger.debug("Worker task cancelled during shutdown") + except asyncio.TimeoutError: + self._logger.warning("Worker task did not complete within timeout during shutdown") + except Exception as e: + self._logger.warning(f"Error while waiting for worker task shutdown: {e}") + def stop(self): """Stops the worker and waits for any pending work items to complete.""" if not self._is_running: return self._logger.info("Stopping gRPC worker...") - self._shutdown.set() if self._response_stream is not None: self._response_stream.cancel() + self._shutdown.set() + # Explicitly close the gRPC channel to ensure OTel interceptors and other resources are cleaned up + if self._current_channel is not None: + try: + self._current_channel.close() + except Exception as e: + self._logger.exception(f"Error closing gRPC channel: {e}") + finally: + self._current_channel = None + if self._runLoop is not None: - self._runLoop.join(timeout=30) + self._runLoop.join(timeout=self._stop_timeout) + if self._runLoop.is_alive(): + self._logger.warning( + f"Worker thread did not complete within {self._stop_timeout}s timeout. " + "Some resources may not be fully cleaned up." + ) + else: + self._logger.debug("Worker thread completed successfully") + + self._async_worker_manager.shutdown() self._logger.info("Worker shutdown completed") self._is_running = False - def _execute_orchestrator(self, req: pb.OrchestratorRequest, stub: stubs.TaskHubSidecarServiceStub): + # TODO: This should be removed in the future as we do handle grpc errs + def _handle_grpc_execution_error(self, rpc_error: grpc.RpcError, request_type: str): + """Handle a gRPC execution error during shutdown or benign condition.""" + # During shutdown or if the instance was terminated, the channel may be close + # or the instance may no longer be recognized by the sidecar. Treat these as benign + # to reduce noisy logging when shutting down. + details = str(rpc_error).lower() + benign_errors = { + grpc.StatusCode.CANCELLED, + grpc.StatusCode.UNAVAILABLE, + grpc.StatusCode.UNKNOWN, + } + if ( + self._shutdown.is_set() + and rpc_error.code() in benign_errors + or ( + "unknown instance id/task id combo" in details + or "channel closed" in details + or "locally cancelled by application" in details + ) + ): + self._logger.debug( + f"Ignoring gRPC {request_type} execution error during shutdown/benign condition: {rpc_error}" + ) + else: + self._logger.exception( + f"Failed to execute gRPC {request_type} execution error: {rpc_error}" + ) + + def _execute_orchestrator( + self, + req: pb.OrchestratorRequest, + stub: stubs.TaskHubSidecarServiceStub, + completionToken, + ): try: executor = _OrchestrationExecutor(self._registry, self._logger) result = executor.execute(req.instanceId, req.pastEvents, req.newEvents) - res = pb.OrchestratorResponse(instanceId=req.instanceId, actions=result.actions, customStatus=wrappers_pb2.StringValue(value=result.custom_status)) + + version = None + if result.version_name: + version = version or pb.OrchestrationVersion() + version.name = result.version_name + if result.patches: + version = version or pb.OrchestrationVersion() + version.patches.extend(result.patches) + + + res = pb.OrchestratorResponse( + instanceId=req.instanceId, + actions=result.actions, + customStatus=ph.get_string_value(result.encoded_custom_status), + completionToken=completionToken, + version=version, + ) except Exception as ex: - self._logger.exception(f"An error occurred while trying to execute instance '{req.instanceId}': {ex}") - failure_details = pbh.new_failure_details(ex) - actions = [pbh.new_complete_orchestration_action(-1, pb.ORCHESTRATION_STATUS_FAILED, "", failure_details)] - res = pb.OrchestratorResponse(instanceId=req.instanceId, actions=actions) + self._logger.exception( + f"An error occurred while trying to execute instance '{req.instanceId}': {ex}" + ) + failure_details = ph.new_failure_details(ex) + actions = [ + ph.new_complete_orchestration_action( + -1, pb.ORCHESTRATION_STATUS_FAILED, "", failure_details + ) + ] + res = pb.OrchestratorResponse( + instanceId=req.instanceId, + actions=actions, + completionToken=completionToken, + ) try: stub.CompleteOrchestratorTask(res) + except grpc.RpcError as rpc_error: # type: ignore + self._handle_grpc_execution_error(rpc_error, "orchestrator") except Exception as ex: - self._logger.exception(f"Failed to deliver orchestrator response for '{req.instanceId}' to sidecar: {ex}") - - def _execute_activity(self, req: pb.ActivityRequest, stub: stubs.TaskHubSidecarServiceStub): + self._logger.exception( + f"Failed to deliver orchestrator response for '{req.instanceId}' to sidecar: {ex}" + ) + + def _execute_activity( + self, + req: pb.ActivityRequest, + stub: stubs.TaskHubSidecarServiceStub, + completionToken, + ): instance_id = req.orchestrationInstance.instanceId try: executor = _ActivityExecutor(self._registry, self._logger) @@ -209,40 +806,56 @@ def _execute_activity(self, req: pb.ActivityRequest, stub: stubs.TaskHubSidecarS res = pb.ActivityResponse( instanceId=instance_id, taskId=req.taskId, - result=pbh.get_string_value(result)) + result=ph.get_string_value(result), + completionToken=completionToken, + ) except Exception as ex: res = pb.ActivityResponse( instanceId=instance_id, taskId=req.taskId, - failureDetails=pbh.new_failure_details(ex)) + failureDetails=ph.new_failure_details(ex), + completionToken=completionToken, + ) try: stub.CompleteActivityTask(res) + except grpc.RpcError as rpc_error: # type: ignore + self._handle_grpc_execution_error(rpc_error, "activity") except Exception as ex: self._logger.exception( - f"Failed to deliver activity response for '{req.name}#{req.taskId}' of orchestration ID '{instance_id}' to sidecar: {ex}") + f"Failed to deliver activity response for '{req.name}#{req.taskId}' of orchestration ID '{instance_id}' to sidecar: {ex}" + ) -class _RuntimeOrchestrationContext(task.OrchestrationContext): +class _RuntimeOrchestrationContext( + task.OrchestrationContext, deterministic.DeterministicContextMixin +): _generator: Optional[Generator[task.Task, Any, Any]] _previous_task: Optional[task.Task] def __init__(self, instance_id: str): + super().__init__() self._generator = None self._is_replaying = True self._is_complete = False self._result = None - self._pending_actions: Dict[int, pb.OrchestratorAction] = {} - self._pending_tasks: Dict[int, task.CompletableTask] = {} + self._pending_actions: dict[int, pb.OrchestratorAction] = {} + self._pending_tasks: dict[int, task.CompletableTask] = {} self._sequence_number = 0 self._current_utc_datetime = datetime(1000, 1, 1) self._instance_id = instance_id + self._app_id = None self._completion_status: Optional[pb.OrchestrationStatus] = None - self._received_events: Dict[str, List[Any]] = {} - self._pending_events: Dict[str, List[task.CompletableTask]] = {} + self._received_events: dict[str, list[Any]] = {} + self._pending_events: dict[str, list[task.CompletableTask]] = {} self._new_input: Optional[Any] = None self._save_events = False - self._custom_status: str = "" + self._encoded_custom_status: Optional[str] = None + self._orchestrator_version_name: Optional[str] = None + self._version_name: Optional[str] = None + self._history_patches: dict[str, bool] = {} + self._applied_patches: dict[str, bool] = {} + self._encountered_patches: list[str] = [] def run(self, generator: Generator[task.Task, Any, Any]): self._generator = generator @@ -254,7 +867,9 @@ def run(self, generator: Generator[task.Task, Any, Any]): def resume(self): if self._generator is None: # This is never expected unless maybe there's an issue with the history - raise TypeError("The orchestrator generator is not initialized! Was the orchestration history corrupted?") + raise TypeError( + "The orchestrator generator is not initialized! Was the orchestration history corrupted?" + ) # We can resume the generator only if the previously yielded task # has reached a completed state. The only time this won't be the @@ -275,7 +890,12 @@ def resume(self): raise TypeError("The orchestrator generator yielded a non-Task object") self._previous_task = next_task - def set_complete(self, result: Any, status: pb.OrchestrationStatus, is_result_encoded: bool = False): + def set_complete( + self, + result: Any, + status: pb.OrchestrationStatus, + is_result_encoded: bool = False, + ): if self._is_complete: return @@ -288,7 +908,11 @@ def set_complete(self, result: Any, status: pb.OrchestrationStatus, is_result_en if result is not None: result_json = result if is_result_encoded else shared.to_json(result) action = ph.new_complete_orchestration_action( - self.next_sequence_number(), status, result_json) + self.next_sequence_number(), + status, + result_json, + router=pb.TaskRouter(sourceAppID=self._app_id) if self._app_id else None, + ) self._pending_actions[action.id] = action def set_failed(self, ex: Exception): @@ -300,10 +924,22 @@ def set_failed(self, ex: Exception): self._completion_status = pb.ORCHESTRATION_STATUS_FAILED action = ph.new_complete_orchestration_action( - self.next_sequence_number(), pb.ORCHESTRATION_STATUS_FAILED, None, ph.new_failure_details(ex) + self.next_sequence_number(), + pb.ORCHESTRATION_STATUS_FAILED, + None, + ph.new_failure_details(ex), + router=pb.TaskRouter(sourceAppID=self._app_id) if self._app_id else None, ) self._pending_actions[action.id] = action + + def set_version_not_registered(self): + self._pending_actions.clear() + self._completion_status = pb.ORCHESTRATION_STATUS_STALLED + action = ph.new_orchestrator_version_not_available_action(self.next_sequence_number()) + self._pending_actions[action.id] = action + + def set_continued_as_new(self, new_input: Any, save_events: bool): if self._is_complete: return @@ -314,10 +950,10 @@ def set_continued_as_new(self, new_input: Any, save_events: bool): self._new_input = new_input self._save_events = save_events - def get_actions(self) -> List[pb.OrchestratorAction]: + def get_actions(self) -> list[pb.OrchestratorAction]: if self._completion_status == pb.ORCHESTRATION_STATUS_CONTINUED_AS_NEW: # When continuing-as-new, we only return a single completion action. - carryover_events: Optional[List[pb.HistoryEvent]] = None + carryover_events: Optional[list[pb.HistoryEvent]] = None if self._save_events: carryover_events = [] # We need to save the current set of pending events so that they can be @@ -325,13 +961,17 @@ def get_actions(self) -> List[pb.OrchestratorAction]: for event_name, values in self._received_events.items(): for event_value in values: encoded_value = shared.to_json(event_value) if event_value else None - carryover_events.append(ph.new_event_raised_event(event_name, encoded_value)) + carryover_events.append( + ph.new_event_raised_event(event_name, encoded_value) + ) action = ph.new_complete_orchestration_action( self.next_sequence_number(), pb.ORCHESTRATION_STATUS_CONTINUED_AS_NEW, result=shared.to_json(self._new_input) if self._new_input is not None else None, failure_details=None, - carryover_events=carryover_events) + carryover_events=carryover_events, + router=pb.TaskRouter(sourceAppID=self._app_id) if self._app_id else None, + ) return [action] else: return list(self._pending_actions.values()) @@ -340,6 +980,10 @@ def next_sequence_number(self) -> int: self._sequence_number += 1 return self._sequence_number + @property + def app_id(self) -> str: + return self._app_id + @property def instance_id(self) -> str: return self._instance_id @@ -348,63 +992,100 @@ def instance_id(self) -> str: def current_utc_datetime(self) -> datetime: return self._current_utc_datetime - @property - def is_replaying(self) -> bool: - return self._is_replaying - @current_utc_datetime.setter def current_utc_datetime(self, value: datetime): self._current_utc_datetime = value - def set_custom_status(self, custom_status: str) -> None: - self._custom_status = custom_status + @property + def is_replaying(self) -> bool: + return self._is_replaying + + def set_custom_status(self, custom_status: Any) -> None: + self._encoded_custom_status = ( + shared.to_json(custom_status) if custom_status is not None else None + ) def create_timer(self, fire_at: Union[datetime, timedelta]) -> task.Task: return self.create_timer_internal(fire_at) - def create_timer_internal(self, fire_at: Union[datetime, timedelta], - retryable_task: Optional[task.RetryableTask] = None) -> task.Task: + def create_timer_internal( + self, + fire_at: Union[datetime, timedelta], + retryable_task: Optional[task.RetryableTask] = None, + ) -> task.Task: id = self.next_sequence_number() if isinstance(fire_at, timedelta): fire_at = self.current_utc_datetime + fire_at action = ph.new_create_timer_action(id, fire_at) self._pending_actions[id] = action - timer_task = task.TimerTask() + timer_task: task.TimerTask = task.TimerTask() if retryable_task is not None: timer_task.set_retryable_parent(retryable_task) self._pending_tasks[id] = timer_task return timer_task - def call_activity(self, activity: Union[task.Activity[TInput, TOutput], str], *, - input: Optional[TInput] = None, - retry_policy: Optional[task.RetryPolicy] = None) -> task.Task[TOutput]: + def call_activity( + self, + activity: Union[task.Activity[TInput, TOutput], str], + *, + input: Optional[TInput] = None, + retry_policy: Optional[task.RetryPolicy] = None, + app_id: Optional[str] = None, + ) -> task.Task[TOutput]: id = self.next_sequence_number() - self.call_activity_function_helper(id, activity, input=input, retry_policy=retry_policy, - is_sub_orch=False) + self.call_activity_function_helper( + id, activity, input=input, retry_policy=retry_policy, is_sub_orch=False, app_id=app_id + ) return self._pending_tasks.get(id, task.CompletableTask()) - def call_sub_orchestrator(self, orchestrator: task.Orchestrator[TInput, TOutput], *, - input: Optional[TInput] = None, - instance_id: Optional[str] = None, - retry_policy: Optional[task.RetryPolicy] = None) -> task.Task[TOutput]: + def call_sub_orchestrator( + self, + orchestrator: Union[task.Orchestrator[TInput, TOutput], str], + *, + input: Optional[TInput] = None, + instance_id: Optional[str] = None, + retry_policy: Optional[task.RetryPolicy] = None, + app_id: Optional[str] = None, + ) -> task.Task[TOutput]: id = self.next_sequence_number() - orchestrator_name = task.get_name(orchestrator) - self.call_activity_function_helper(id, orchestrator_name, input=input, retry_policy=retry_policy, - is_sub_orch=True, instance_id=instance_id) + if isinstance(orchestrator, str): + orchestrator_name = orchestrator + else: + orchestrator_name = task.get_name(orchestrator) + self.call_activity_function_helper( + id, + orchestrator_name, + input=input, + retry_policy=retry_policy, + is_sub_orch=True, + instance_id=instance_id, + app_id=app_id, + ) return self._pending_tasks.get(id, task.CompletableTask()) - def call_activity_function_helper(self, id: Optional[int], - activity_function: Union[task.Activity[TInput, TOutput], str], *, - input: Optional[TInput] = None, - retry_policy: Optional[task.RetryPolicy] = None, - is_sub_orch: bool = False, - instance_id: Optional[str] = None, - fn_task: Optional[task.CompletableTask[TOutput]] = None): + def call_activity_function_helper( + self, + id: Optional[int], + activity_function: Union[task.Activity[TInput, TOutput], str], + *, + input: Optional[TInput] = None, + retry_policy: Optional[task.RetryPolicy] = None, + is_sub_orch: bool = False, + instance_id: Optional[str] = None, + fn_task: Optional[task.CompletableTask[TOutput]] = None, + app_id: Optional[str] = None, + ): if id is None: id = self.next_sequence_number() + router = pb.TaskRouter() + if self._app_id is not None: + router.sourceAppID = self._app_id + if app_id is not None: + router.targetAppID = app_id + if fn_task is None: encoded_input = shared.to_json(input) if input is not None else None else: @@ -412,24 +1093,33 @@ def call_activity_function_helper(self, id: Optional[int], # We just need to take string representation of it. encoded_input = str(input) if not is_sub_orch: - name = activity_function if isinstance(activity_function, str) else task.get_name(activity_function) - action = ph.new_schedule_task_action(id, name, encoded_input) + name = ( + activity_function + if isinstance(activity_function, str) + else task.get_name(activity_function) + ) + action = ph.new_schedule_task_action(id, name, encoded_input, router) else: if instance_id is None: # Create a deteministic instance ID based on the parent instance ID instance_id = f"{self.instance_id}:{id:04x}" if not isinstance(activity_function, str): raise ValueError("Orchestrator function name must be a string") - action = ph.new_create_sub_orchestration_action(id, activity_function, instance_id, encoded_input) + action = ph.new_create_sub_orchestration_action( + id, activity_function, instance_id, encoded_input, router + ) self._pending_actions[id] = action if fn_task is None: if retry_policy is None: fn_task = task.CompletableTask[TOutput]() else: - fn_task = task.RetryableTask[TOutput](retry_policy=retry_policy, action=action, - start_time=self.current_utc_datetime, - is_sub_orch=is_sub_orch) + fn_task = task.RetryableTask[TOutput]( + retry_policy=retry_policy, + action=action, + start_time=self.current_utc_datetime, + is_sub_orch=is_sub_orch, + ) self._pending_tasks[id] = fn_task def wait_for_external_event(self, name: str) -> task.Task: @@ -438,7 +1128,7 @@ def wait_for_external_event(self, name: str) -> task.Task: # event with the given name so that we can resume the generator when it # arrives. If there are multiple events with the same name, we return # them in the order they were received. - external_event_task = task.CompletableTask() + external_event_task: task.CompletableTask = task.CompletableTask() event_name = name.casefold() event_list = self._received_events.get(event_name, None) if event_list: @@ -461,13 +1151,39 @@ def continue_as_new(self, new_input, *, save_events: bool = False) -> None: self.set_continued_as_new(new_input, save_events) + def is_patched(self, patch_name: str) -> bool: + is_patched = self._is_patched(patch_name) + if is_patched: + self._encountered_patches.append(patch_name) + return is_patched + + def _is_patched(self, patch_name: str) -> bool: + if patch_name in self._applied_patches: + return self._applied_patches[patch_name] + if patch_name in self._history_patches: + self._applied_patches[patch_name] = True + return True + + if self._is_replaying: + self._applied_patches[patch_name] = False + return False + + self._applied_patches[patch_name] = True + return True + + class ExecutionResults: - actions: List[pb.OrchestratorAction] - custom_status: str + actions: list[pb.OrchestratorAction] + encoded_custom_status: Optional[str] + version_name: Optional[str] + patches: Optional[list[str]] - def __init__(self, actions: List[pb.OrchestratorAction], custom_status: str): + def __init__(self, actions: list[pb.OrchestratorAction], encoded_custom_status: Optional[str], version_name: Optional[str] = None, patches: Optional[list[str]] = None): self.actions = actions - self.custom_status = custom_status + self.encoded_custom_status = encoded_custom_status + self.version_name = version_name + self.patches = patches + class _OrchestrationExecutor: _generator: Optional[task.Orchestrator] = None @@ -476,16 +1192,25 @@ def __init__(self, registry: _Registry, logger: logging.Logger): self._registry = registry self._logger = logger self._is_suspended = False - self._suspended_events: List[pb.HistoryEvent] = [] - - def execute(self, instance_id: str, old_events: Sequence[pb.HistoryEvent], new_events: Sequence[pb.HistoryEvent]) -> ExecutionResults: + self._suspended_events: list[pb.HistoryEvent] = [] + + def execute( + self, + instance_id: str, + old_events: Sequence[pb.HistoryEvent], + new_events: Sequence[pb.HistoryEvent], + ) -> ExecutionResults: if not new_events: - raise task.OrchestrationStateError("The new history event list must have at least one event in it.") + raise task.OrchestrationStateError( + "The new history event list must have at least one event in it." + ) ctx = _RuntimeOrchestrationContext(instance_id) try: # Rebuild local state by replaying old history into the orchestrator function - self._logger.debug(f"{instance_id}: Rebuilding local state with {len(old_events)} history event...") + self._logger.debug( + f"{instance_id}: Rebuilding local state with {len(old_events)} history event..." + ) ctx._is_replaying = True for old_event in old_events: self.process_event(ctx, old_event) @@ -493,11 +1218,15 @@ def execute(self, instance_id: str, old_events: Sequence[pb.HistoryEvent], new_e # Get new actions by executing newly received events into the orchestrator function if self._logger.level <= logging.DEBUG: summary = _get_new_event_summary(new_events) - self._logger.debug(f"{instance_id}: Processing {len(new_events)} new event(s): {summary}") + self._logger.debug( + f"{instance_id}: Processing {len(new_events)} new event(s): {summary}" + ) ctx._is_replaying = False for new_event in new_events: self.process_event(ctx, new_event) + except VersionNotRegisteredException: + ctx.set_version_not_registered() except Exception as ex: # Unhandled exceptions fail the orchestration ctx.set_failed(ex) @@ -505,15 +1234,29 @@ def execute(self, instance_id: str, old_events: Sequence[pb.HistoryEvent], new_e if not ctx._is_complete: task_count = len(ctx._pending_tasks) event_count = len(ctx._pending_events) - self._logger.info(f"{instance_id}: Orchestrator yielded with {task_count} task(s) and {event_count} event(s) outstanding.") - elif ctx._completion_status and ctx._completion_status is not pb.ORCHESTRATION_STATUS_CONTINUED_AS_NEW: - completion_status_str = pbh.get_orchestration_status_str(ctx._completion_status) - self._logger.info(f"{instance_id}: Orchestration completed with status: {completion_status_str}") + self._logger.info( + f"{instance_id}: Orchestrator yielded with {task_count} task(s) and {event_count} event(s) outstanding." + ) + elif ( + ctx._completion_status + and ctx._completion_status is not pb.ORCHESTRATION_STATUS_CONTINUED_AS_NEW + ): + completion_status_str = ph.get_orchestration_status_str(ctx._completion_status) + self._logger.info( + f"{instance_id}: Orchestration completed with status: {completion_status_str}" + ) actions = ctx.get_actions() if self._logger.level <= logging.DEBUG: - self._logger.debug(f"{instance_id}: Returning {len(actions)} action(s): {_get_action_summary(actions)}") - return ExecutionResults(actions=actions, custom_status=ctx._custom_status) + self._logger.debug( + f"{instance_id}: Returning {len(actions)} action(s): {_get_action_summary(actions)}" + ) + return ExecutionResults( + actions=actions, + encoded_custom_status=ctx._encoded_custom_status, + version_name=getattr(ctx, '_version_name', None), + patches=ctx._encountered_patches + ) def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEvent) -> None: if self._is_suspended and _is_suspendable(event): @@ -525,16 +1268,39 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven try: if event.HasField("orchestratorStarted"): ctx.current_utc_datetime = event.timestamp.ToDatetime() + if event.orchestratorStarted.version: + if event.orchestratorStarted.version.name: + ctx._orchestrator_version_name = event.orchestratorStarted.version.name + for patch in event.orchestratorStarted.version.patches: + ctx._history_patches[patch] = True elif event.HasField("executionStarted"): + if event.router.targetAppID: + ctx._app_id = event.router.targetAppID + else: + ctx._app_id = event.router.sourceAppID + + version_name = None + if ctx._orchestrator_version_name: + version_name = ctx._orchestrator_version_name + + # TODO: Check if we already started the orchestration - fn = self._registry.get_orchestrator(event.executionStarted.name) + fn, version_used = self._registry.get_orchestrator(event.executionStarted.name, version_name=version_name) + if fn is None: raise OrchestratorNotRegisteredError( - f"A '{event.executionStarted.name}' orchestrator was not registered.") + f"A '{event.executionStarted.name}' orchestrator was not registered." + ) + + if version_used is not None: + ctx._version_name = version_used # deserialize the input, if any input = None - if event.executionStarted.input is not None and event.executionStarted.input.value != "": + if ( + event.executionStarted.input is not None + and event.executionStarted.input.value != "" + ): input = shared.from_json(event.executionStarted.input.value) result = fn(ctx, input) # this does not execute the generator, only creates it @@ -561,7 +1327,8 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven # TODO: Should this be an error? When would it ever happen? if not ctx._is_replaying: self._logger.warning( - f"{ctx.instance_id}: Ignoring unexpected timerFired event with ID = {timer_id}.") + f"{ctx.instance_id}: Ignoring unexpected timerFired event with ID = {timer_id}." + ) return timer_task.complete(None) if timer_task._retryable_parent is not None: @@ -573,12 +1340,21 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven else: cur_task = activity_action.createSubOrchestration instance_id = cur_task.instanceId - ctx.call_activity_function_helper(id=activity_action.id, activity_function=cur_task.name, - input=cur_task.input.value, - retry_policy=timer_task._retryable_parent._retry_policy, - is_sub_orch=timer_task._retryable_parent._is_sub_orch, - instance_id=instance_id, - fn_task=timer_task._retryable_parent) + if cur_task.router and cur_task.router.targetAppID: + target_app_id = cur_task.router.targetAppID + else: + target_app_id = None + + ctx.call_activity_function_helper( + id=activity_action.id, + activity_function=cur_task.name, + input=cur_task.input.value, + retry_policy=timer_task._retryable_parent._retry_policy, + is_sub_orch=timer_task._retryable_parent._is_sub_orch, + instance_id=instance_id, + fn_task=timer_task._retryable_parent, + app_id=target_app_id, + ) else: ctx.resume() elif event.HasField("taskScheduled"): @@ -597,7 +1373,8 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven task_id, method_name=task.get_name(ctx.call_activity), expected_task_name=event.taskScheduled.name, - actual_task_name=action.scheduleTask.name) + actual_task_name=action.scheduleTask.name, + ) elif event.HasField("taskCompleted"): # This history event contains the result of a completed activity task. task_id = event.taskCompleted.taskScheduledId @@ -606,7 +1383,8 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven # TODO: Should this be an error? When would it ever happen? if not ctx.is_replaying: self._logger.warning( - f"{ctx.instance_id}: Ignoring unexpected taskCompleted event with ID = {task_id}.") + f"{ctx.instance_id}: Ignoring unexpected taskCompleted event with ID = {task_id}." + ) return result = None if not ph.is_empty(event.taskCompleted.result): @@ -620,24 +1398,37 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven # TODO: Should this be an error? When would it ever happen? if not ctx.is_replaying: self._logger.warning( - f"{ctx.instance_id}: Ignoring unexpected taskFailed event with ID = {task_id}.") + f"{ctx.instance_id}: Ignoring unexpected taskFailed event with ID = {task_id}." + ) return if isinstance(activity_task, task.RetryableTask): if activity_task._retry_policy is not None: - next_delay = activity_task.compute_next_delay() - if next_delay is None: + # Check for non-retryable errors by type name + if task.is_error_non_retryable( + event.taskFailed.failureDetails.errorType, activity_task._retry_policy + ): activity_task.fail( f"{ctx.instance_id}: Activity task #{task_id} failed: {event.taskFailed.failureDetails.errorMessage}", - event.taskFailed.failureDetails) + event.taskFailed.failureDetails, + ) ctx.resume() else: - activity_task.increment_attempt_count() - ctx.create_timer_internal(next_delay, activity_task) + next_delay = activity_task.compute_next_delay() + if next_delay is None: + activity_task.fail( + f"{ctx.instance_id}: Activity task #{task_id} failed: {event.taskFailed.failureDetails.errorMessage}", + event.taskFailed.failureDetails, + ) + ctx.resume() + else: + activity_task.increment_attempt_count() + ctx.create_timer_internal(next_delay, activity_task) elif isinstance(activity_task, task.CompletableTask): activity_task.fail( f"{ctx.instance_id}: Activity task #{task_id} failed: {event.taskFailed.failureDetails.errorMessage}", - event.taskFailed.failureDetails) + event.taskFailed.failureDetails, + ) ctx.resume() else: raise TypeError("Unexpected task type") @@ -647,16 +1438,21 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven task_id = event.eventId action = ctx._pending_actions.pop(task_id, None) if not action: - raise _get_non_determinism_error(task_id, task.get_name(ctx.call_sub_orchestrator)) + raise _get_non_determinism_error( + task_id, task.get_name(ctx.call_sub_orchestrator) + ) elif not action.HasField("createSubOrchestration"): expected_method_name = task.get_name(ctx.call_sub_orchestrator) raise _get_wrong_action_type_error(task_id, expected_method_name, action) - elif action.createSubOrchestration.name != event.subOrchestrationInstanceCreated.name: + elif ( + action.createSubOrchestration.name != event.subOrchestrationInstanceCreated.name + ): raise _get_wrong_action_name_error( task_id, method_name=task.get_name(ctx.call_sub_orchestrator), expected_task_name=event.subOrchestrationInstanceCreated.name, - actual_task_name=action.createSubOrchestration.name) + actual_task_name=action.createSubOrchestration.name, + ) elif event.HasField("subOrchestrationInstanceCompleted"): task_id = event.subOrchestrationInstanceCompleted.taskScheduledId sub_orch_task = ctx._pending_tasks.pop(task_id, None) @@ -664,7 +1460,8 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven # TODO: Should this be an error? When would it ever happen? if not ctx.is_replaying: self._logger.warning( - f"{ctx.instance_id}: Ignoring unexpected subOrchestrationInstanceCompleted event with ID = {task_id}.") + f"{ctx.instance_id}: Ignoring unexpected subOrchestrationInstanceCompleted event with ID = {task_id}." + ) return result = None if not ph.is_empty(event.subOrchestrationInstanceCompleted.result): @@ -679,23 +1476,36 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven # TODO: Should this be an error? When would it ever happen? if not ctx.is_replaying: self._logger.warning( - f"{ctx.instance_id}: Ignoring unexpected subOrchestrationInstanceFailed event with ID = {task_id}.") + f"{ctx.instance_id}: Ignoring unexpected subOrchestrationInstanceFailed event with ID = {task_id}." + ) return if isinstance(sub_orch_task, task.RetryableTask): if sub_orch_task._retry_policy is not None: - next_delay = sub_orch_task.compute_next_delay() - if next_delay is None: + # Check for non-retryable errors by type name + if task.is_error_non_retryable( + failedEvent.failureDetails.errorType, sub_orch_task._retry_policy + ): sub_orch_task.fail( f"Sub-orchestration task #{task_id} failed: {failedEvent.failureDetails.errorMessage}", - failedEvent.failureDetails) + failedEvent.failureDetails, + ) ctx.resume() else: - sub_orch_task.increment_attempt_count() - ctx.create_timer_internal(next_delay, sub_orch_task) + next_delay = sub_orch_task.compute_next_delay() + if next_delay is None: + sub_orch_task.fail( + f"Sub-orchestration task #{task_id} failed: {failedEvent.failureDetails.errorMessage}", + failedEvent.failureDetails, + ) + ctx.resume() + else: + sub_orch_task.increment_attempt_count() + ctx.create_timer_internal(next_delay, sub_orch_task) elif isinstance(sub_orch_task, task.CompletableTask): sub_orch_task.fail( f"Sub-orchestration task #{task_id} failed: {failedEvent.failureDetails.errorMessage}", - failedEvent.failureDetails) + failedEvent.failureDetails, + ) ctx.resume() else: raise TypeError("Unexpected sub-orchestration task type") @@ -724,7 +1534,9 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven decoded_result = shared.from_json(event.eventRaised.input.value) event_list.append(decoded_result) if not ctx.is_replaying: - self._logger.info(f"{ctx.instance_id}: Event '{event_name}' has been buffered as there are no tasks waiting for it.") + self._logger.info( + f"{ctx.instance_id}: Event '{event_name}' has been buffered as there are no tasks waiting for it." + ) elif event.HasField("executionSuspended"): if not self._is_suspended and not ctx.is_replaying: self._logger.info(f"{ctx.instance_id}: Execution suspended.") @@ -739,11 +1551,24 @@ def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEven elif event.HasField("executionTerminated"): if not ctx.is_replaying: self._logger.info(f"{ctx.instance_id}: Execution terminating.") - encoded_output = event.executionTerminated.input.value if not ph.is_empty(event.executionTerminated.input) else None - ctx.set_complete(encoded_output, pb.ORCHESTRATION_STATUS_TERMINATED, is_result_encoded=True) + encoded_output = ( + event.executionTerminated.input.value + if not ph.is_empty(event.executionTerminated.input) + else None + ) + ctx.set_complete( + encoded_output, + pb.ORCHESTRATION_STATUS_TERMINATED, + is_result_encoded=True, + ) + elif event.HasField("executionStalled"): + # Nothing to do + pass else: eventType = event.WhichOneof("eventType") - raise task.OrchestrationStateError(f"Don't know how to handle event of type '{eventType}'") + raise task.OrchestrationStateError( + f"Don't know how to handle event of type '{eventType}'" + ) except StopIteration as generatorStopped: # The orchestrator generator function completed ctx.set_complete(generatorStopped.value, pb.ORCHESTRATION_STATUS_COMPLETED) @@ -754,12 +1579,20 @@ def __init__(self, registry: _Registry, logger: logging.Logger): self._registry = registry self._logger = logger - def execute(self, orchestration_id: str, name: str, task_id: int, encoded_input: Optional[str]) -> Optional[str]: + def execute( + self, + orchestration_id: str, + name: str, + task_id: int, + encoded_input: Optional[str], + ) -> Optional[str]: """Executes an activity function and returns the serialized result, if any.""" self._logger.debug(f"{orchestration_id}/{task_id}: Executing activity '{name}'...") fn = self._registry.get_activity(name) if not fn: - raise ActivityNotRegisteredError(f"Activity function named '{name}' was not registered!") + raise ActivityNotRegisteredError( + f"Activity function named '{name}' was not registered!" + ) activity_input = shared.from_json(encoded_input) if encoded_input else None ctx = task.ActivityContext(orchestration_id, task_id) @@ -770,7 +1603,8 @@ def execute(self, orchestration_id: str, name: str, task_id: int, encoded_input: encoded_output = shared.to_json(activity_output) if activity_output is not None else None chars = len(encoded_output) if encoded_output else 0 self._logger.debug( - f"{orchestration_id}/{task_id}: Activity '{name}' completed successfully with {chars} char(s) of encoded output.") + f"{orchestration_id}/{task_id}: Activity '{name}' completed successfully with {chars} char(s) of encoded output." + ) return encoded_output @@ -779,37 +1613,37 @@ def _get_non_determinism_error(task_id: int, action_name: str) -> task.NonDeterm f"A previous execution called {action_name} with ID={task_id}, but the current " f"execution doesn't have this action with this ID. This problem occurs when either " f"the orchestration has non-deterministic logic or if the code was changed after an " - f"instance of this orchestration already started running.") + f"instance of this orchestration already started running." + ) def _get_wrong_action_type_error( - task_id: int, - expected_method_name: str, - action: pb.OrchestratorAction) -> task.NonDeterminismError: + task_id: int, expected_method_name: str, action: pb.OrchestratorAction +) -> task.NonDeterminismError: unexpected_method_name = _get_method_name_for_action(action) return task.NonDeterminismError( f"Failed to restore orchestration state due to a history mismatch: A previous execution called " f"{expected_method_name} with ID={task_id}, but the current execution is instead trying to call " f"{unexpected_method_name} as part of rebuilding it's history. This kind of mismatch can happen if an " f"orchestration has non-deterministic logic or if the code was changed after an instance of this " - f"orchestration already started running.") + f"orchestration already started running." + ) def _get_wrong_action_name_error( - task_id: int, - method_name: str, - expected_task_name: str, - actual_task_name: str) -> task.NonDeterminismError: + task_id: int, method_name: str, expected_task_name: str, actual_task_name: str +) -> task.NonDeterminismError: return task.NonDeterminismError( f"Failed to restore orchestration state due to a history mismatch: A previous execution called " f"{method_name} with name='{expected_task_name}' and sequence number {task_id}, but the current " f"execution is instead trying to call {actual_task_name} as part of rebuilding it's history. " f"This kind of mismatch can happen if an orchestration has non-deterministic logic or if the code " - f"was changed after an instance of this orchestration already started running.") + f"was changed after an instance of this orchestration already started running." + ) def _get_method_name_for_action(action: pb.OrchestratorAction) -> str: - action_type = action.WhichOneof('orchestratorActionType') + action_type = action.WhichOneof("orchestratorActionType") if action_type == "scheduleTask": return task.get_name(task.OrchestrationContext.call_activity) elif action_type == "createTimer": @@ -829,9 +1663,9 @@ def _get_new_event_summary(new_events: Sequence[pb.HistoryEvent]) -> str: elif len(new_events) == 1: return f"[{new_events[0].WhichOneof('eventType')}]" else: - counts: Dict[str, int] = {} + counts: dict[str, int] = {} for event in new_events: - event_type = event.WhichOneof('eventType') + event_type = event.WhichOneof("eventType") counts[event_type] = counts.get(event_type, 0) + 1 return f"[{', '.join(f'{name}={count}' for name, count in counts.items())}]" @@ -843,13 +1677,231 @@ def _get_action_summary(new_actions: Sequence[pb.OrchestratorAction]) -> str: elif len(new_actions) == 1: return f"[{new_actions[0].WhichOneof('orchestratorActionType')}]" else: - counts: Dict[str, int] = {} + counts: dict[str, int] = {} for action in new_actions: - action_type = action.WhichOneof('orchestratorActionType') + action_type = action.WhichOneof("orchestratorActionType") counts[action_type] = counts.get(action_type, 0) + 1 return f"[{', '.join(f'{name}={count}' for name, count in counts.items())}]" def _is_suspendable(event: pb.HistoryEvent) -> bool: """Returns true if the event is one that can be suspended and resumed.""" - return event.WhichOneof("eventType") not in ["executionResumed", "executionTerminated"] + return event.WhichOneof("eventType") not in [ + "executionResumed", + "executionTerminated", + ] + + +class _AsyncWorkerManager: + def __init__(self, concurrency_options: ConcurrencyOptions): + self.concurrency_options = concurrency_options + self.activity_semaphore = None + self.orchestration_semaphore = None + # Don't create queues here - defer until we have an event loop + self.activity_queue: Optional[asyncio.Queue] = None + self.orchestration_queue: Optional[asyncio.Queue] = None + self._queue_event_loop: Optional[asyncio.AbstractEventLoop] = None + # Store work items when no event loop is available + self._pending_activity_work: list = [] + self._pending_orchestration_work: list = [] + self.thread_pool = ThreadPoolExecutor( + max_workers=concurrency_options.maximum_thread_pool_workers, + thread_name_prefix="DurableTask", + ) + self._shutdown = False + + def _ensure_queues_for_current_loop(self): + """Ensure queues are bound to the current event loop.""" + try: + current_loop = asyncio.get_running_loop() + if current_loop.is_closed(): + return + except RuntimeError: + # No event loop running, can't create queues + return + + # Check if queues are already properly set up for current loop + if self._queue_event_loop is current_loop: + if self.activity_queue is not None and self.orchestration_queue is not None: + # Queues are already bound to the current loop and exist + return + + # Need to recreate queues for the current event loop + # First, preserve any existing work items + existing_activity_items = [] + existing_orchestration_items = [] + + if self.activity_queue is not None: + try: + while not self.activity_queue.empty(): + existing_activity_items.append(self.activity_queue.get_nowait()) + except Exception: + pass + + if self.orchestration_queue is not None: + try: + while not self.orchestration_queue.empty(): + existing_orchestration_items.append(self.orchestration_queue.get_nowait()) + except Exception: + pass + + # Create fresh queues for the current event loop + self.activity_queue = asyncio.Queue() + self.orchestration_queue = asyncio.Queue() + self._queue_event_loop = current_loop + + # Restore the work items to the new queues + for item in existing_activity_items: + self.activity_queue.put_nowait(item) + for item in existing_orchestration_items: + self.orchestration_queue.put_nowait(item) + + # Move pending work items to the queues + for item in self._pending_activity_work: + self.activity_queue.put_nowait(item) + for item in self._pending_orchestration_work: + self.orchestration_queue.put_nowait(item) + + # Clear the pending work lists + self._pending_activity_work.clear() + self._pending_orchestration_work.clear() + + async def run(self): + # Reset shutdown flag in case this manager is being reused + self._shutdown = False + + # Ensure queues are properly bound to the current event loop + self._ensure_queues_for_current_loop() + + # Create semaphores in the current event loop + self.activity_semaphore = asyncio.Semaphore( + self.concurrency_options.maximum_concurrent_activity_work_items + ) + self.orchestration_semaphore = asyncio.Semaphore( + self.concurrency_options.maximum_concurrent_orchestration_work_items + ) + + # Start background consumers for each work type + if self.activity_queue is not None and self.orchestration_queue is not None: + await asyncio.gather( + self._consume_queue(self.activity_queue, self.activity_semaphore), + self._consume_queue(self.orchestration_queue, self.orchestration_semaphore), + ) + + async def _consume_queue(self, queue: asyncio.Queue, semaphore: asyncio.Semaphore): + # List to track running tasks + running_tasks: set[asyncio.Task] = set() + + try: + while True: + # Clean up completed tasks + done_tasks = {task for task in running_tasks if task.done()} + running_tasks -= done_tasks + + # Exit if shutdown is set and the queue is empty and no tasks are running + if self._shutdown and queue.empty() and not running_tasks: + break + + try: + work = await asyncio.wait_for(queue.get(), timeout=1.0) + except asyncio.TimeoutError: + # Check for cancellation during timeout and exit while loop if shutting down + if self._shutdown: + break + continue # otherwise wait for work item to become available and loop again + except asyncio.CancelledError: + # Propagate cancellation + raise + + func, args, kwargs = work + # Create a concurrent task for processing + task = asyncio.create_task( + self._process_work_item(semaphore, queue, func, args, kwargs) + ) + running_tasks.add(task) + # handle the cancellation bubbled up from the loop + except asyncio.CancelledError: + # Cancel any remaining running tasks + for task in running_tasks: + if not task.done(): + task.cancel() + # Wait briefly for tasks to cancel, but don't block indefinitely + if running_tasks: + await asyncio.gather(*running_tasks, return_exceptions=True) + raise + + async def _process_work_item( + self, semaphore: asyncio.Semaphore, queue: asyncio.Queue, func, args, kwargs + ): + async with semaphore: + try: + await self._run_func(func, *args, **kwargs) + finally: + queue.task_done() + + async def _run_func(self, func, *args, **kwargs): + if inspect.iscoroutinefunction(func): + return await func(*args, **kwargs) + else: + loop = asyncio.get_running_loop() + # Avoid submitting to executor after shutdown + if ( + getattr(self, "_shutdown", False) + and getattr(self, "thread_pool", None) + and getattr(self.thread_pool, "_shutdown", False) + ): + return None + result = await loop.run_in_executor(self.thread_pool, lambda: func(*args, **kwargs)) + return result + + def submit_activity(self, func, *args, **kwargs): + work_item = (func, args, kwargs) + self._ensure_queues_for_current_loop() + if self.activity_queue is not None: + self.activity_queue.put_nowait(work_item) + else: + # No event loop running, store in pending list + self._pending_activity_work.append(work_item) + + def submit_orchestration(self, func, *args, **kwargs): + work_item = (func, args, kwargs) + self._ensure_queues_for_current_loop() + if self.orchestration_queue is not None: + self.orchestration_queue.put_nowait(work_item) + else: + # No event loop running, store in pending list + self._pending_orchestration_work.append(work_item) + + def shutdown(self): + self._shutdown = True + # Shutdown thread pool. Since we've already cancelled worker_task and set _shutdown=True, + # no new work should be submitted and existing work should complete quickly. + # ThreadPoolExecutor.shutdown(wait=True) doesn't support a timeout, but with proper + # cancellation in place, threads should exit promptly, otherwise this will hang and block shutdown for the application. + self.thread_pool.shutdown(wait=True) + + def reset_for_new_run(self): + """Reset the manager state for a new run.""" + self._shutdown = False + # Clear any existing queues - they'll be recreated when needed + if self.activity_queue is not None: + # Clear existing queue by creating a new one + # This ensures no items from previous runs remain + try: + while not self.activity_queue.empty(): + self.activity_queue.get_nowait() + except Exception: + pass + if self.orchestration_queue is not None: + try: + while not self.orchestration_queue.empty(): + self.orchestration_queue.get_nowait() + except Exception: + pass + # Clear pending work lists + self._pending_activity_work.clear() + self._pending_orchestration_work.clear() + + +# Export public API +__all__ = ["ConcurrencyOptions", "TaskHubGrpcWorker"] diff --git a/examples/README.md b/examples/README.md index ec9088fb..a6cd8476 100644 --- a/examples/README.md +++ b/examples/README.md @@ -8,7 +8,11 @@ All the examples assume that you have a Durable Task-compatible sidecar running 1. Install the latest version of the [Dapr CLI](https://docs.dapr.io/getting-started/install-dapr-cli/), which contains and exposes an embedded version of the Durable Task engine. The setup process (which requires Docker) will configure the workflow engine to store state in a local Redis container. -1. Clone and run the [Durable Task Sidecar](https://github.com/microsoft/durabletask-go) project locally (requires Go 1.18 or higher). Orchestration state will be stored in a local sqlite database. +2. Run the [Durable Task Sidecar](https://github.com/dapr/durabletask-go) project locally (requires Go 1.18 or higher). Orchestration state will be stored in a local sqlite database. + ```sh + go install github.com/dapr/durabletask-go@main + durabletask-go --port 4001 + ``` ## Running the examples @@ -24,4 +28,4 @@ In some cases, the sample may require command-line parameters or user inputs. In - [Activity sequence](./activity_sequence.py): Orchestration that schedules three activity calls in a sequence. - [Fan-out/fan-in](./fanout_fanin.py): Orchestration that schedules a dynamic number of activity calls in parallel, waits for all of them to complete, and then performs an aggregation on the results. -- [Human interaction](./human_interaction.py): Orchestration that waits for a human to approve an order before continuing. \ No newline at end of file +- [Human interaction](./human_interaction.py): Orchestration that waits for a human to approve an order before continuing. diff --git a/examples/activity_sequence.py b/examples/activity_sequence.py index 066a733a..fa88363d 100644 --- a/examples/activity_sequence.py +++ b/examples/activity_sequence.py @@ -1,19 +1,20 @@ """End-to-end sample that demonstrates how to configure an orchestrator that calls an activity function in a sequence and prints the outputs.""" + from durabletask import client, task, worker def hello(ctx: task.ActivityContext, name: str) -> str: """Activity function that returns a greeting""" - return f'Hello {name}!' + return f"Hello {name}!" def sequence(ctx: task.OrchestrationContext, _): """Orchestrator function that calls the 'hello' activity function in a sequence""" # call "hello" activity function in a sequence - result1 = yield ctx.call_activity(hello, input='Tokyo') - result2 = yield ctx.call_activity(hello, input='Seattle') - result3 = yield ctx.call_activity(hello, input='London') + result1 = yield ctx.call_activity(hello, input="Tokyo") + result2 = yield ctx.call_activity(hello, input="Seattle") + result3 = yield ctx.call_activity(hello, input="London") # return an array of results return [result1, result2, result3] @@ -30,6 +31,6 @@ def sequence(ctx: task.OrchestrationContext, _): instance_id = c.schedule_new_orchestration(sequence) state = c.wait_for_orchestration_completion(instance_id, timeout=10) if state and state.runtime_status == client.OrchestrationStatus.COMPLETED: - print(f'Orchestration completed! Result: {state.serialized_output}') + print(f"Orchestration completed! Result: {state.serialized_output}") elif state: - print(f'Orchestration failed: {state.failure_details}') + print(f"Orchestration failed: {state.failure_details}") diff --git a/examples/fanout_fanin.py b/examples/fanout_fanin.py index 3e054dfa..30339b74 100644 --- a/examples/fanout_fanin.py +++ b/examples/fanout_fanin.py @@ -1,24 +1,24 @@ """End-to-end sample that demonstrates how to configure an orchestrator that a dynamic number activity functions in parallel, waits for them all to complete, and prints an aggregate summary of the outputs.""" + import random import time -from typing import List from durabletask import client, task, worker -def get_work_items(ctx: task.ActivityContext, _) -> List[str]: +def get_work_items(ctx: task.ActivityContext, _) -> list[str]: """Activity function that returns a list of work items""" # return a random number of work items count = random.randint(2, 10) - print(f'generating {count} work items...') - return [f'work item {i}' for i in range(count)] + print(f"generating {count} work items...") + return [f"work item {i}" for i in range(count)] def process_work_item(ctx: task.ActivityContext, item: str) -> int: """Activity function that returns a result for a given work item""" - print(f'processing work item: {item}') + print(f"processing work item: {item}") # simulate some work that takes a variable amount of time time.sleep(random.random() * 5) @@ -32,17 +32,17 @@ def orchestrator(ctx: task.OrchestrationContext, _): activity functions in parallel, waits for them all to complete, and prints an aggregate summary of the outputs""" - work_items: List[str] = yield ctx.call_activity(get_work_items) + work_items: list[str] = yield ctx.call_activity(get_work_items) # execute the work-items in parallel and wait for them all to return tasks = [ctx.call_activity(process_work_item, input=item) for item in work_items] - results: List[int] = yield task.when_all(tasks) + results: list[int] = yield task.when_all(tasks) # return an aggregate summary of the results return { - 'work_items': work_items, - 'results': results, - 'total': sum(results), + "work_items": work_items, + "results": results, + "total": sum(results), } @@ -58,6 +58,6 @@ def orchestrator(ctx: task.OrchestrationContext, _): instance_id = c.schedule_new_orchestration(orchestrator) state = c.wait_for_orchestration_completion(instance_id, timeout=30) if state and state.runtime_status == client.OrchestrationStatus.COMPLETED: - print(f'Orchestration completed! Result: {state.serialized_output}') + print(f"Orchestration completed! Result: {state.serialized_output}") elif state: - print(f'Orchestration failed: {state.failure_details}') + print(f"Orchestration failed: {state.failure_details}") diff --git a/examples/human_interaction.py b/examples/human_interaction.py index 2a01897c..97730556 100644 --- a/examples/human_interaction.py +++ b/examples/human_interaction.py @@ -15,23 +15,24 @@ @dataclass class Order: """Represents a purchase order""" + Cost: float Product: str Quantity: int def __str__(self): - return f'{self.Product} ({self.Quantity})' + return f"{self.Product} ({self.Quantity})" def send_approval_request(_: task.ActivityContext, order: Order) -> None: """Activity function that sends an approval request to the manager""" time.sleep(5) - print(f'*** Sending approval request for order: {order}') + print(f"*** Sending approval request for order: {order}") def place_order(_: task.ActivityContext, order: Order) -> None: """Activity function that places an order""" - print(f'*** Placing order: {order}') + print(f"*** Placing order: {order}") def purchase_order_workflow(ctx: task.OrchestrationContext, order: Order): @@ -92,7 +93,7 @@ def prompt_for_approval(): if not state: print("Workflow not found!") # not expected elif state.runtime_status == client.OrchestrationStatus.COMPLETED: - print(f'Orchestration completed! Result: {state.serialized_output}') + print(f"Orchestration completed! Result: {state.serialized_output}") else: state.raise_if_failed() # raises an exception except TimeoutError: diff --git a/pyproject.toml b/pyproject.toml index d57957d9..6626bc21 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,12 +4,12 @@ # For more information on pyproject.toml, see https://peps.python.org/pep-0621/ [build-system] -requires = ["setuptools", "wheel"] +requires = ["setuptools", "wheel", "setuptools_scm"] build-backend = "setuptools.build_meta" [project] -name = "durabletask" -version = "0.1.1-alpha.1" +name = "durabletask-dapr" +dynamic = ["version"] description = "A Durable Task Client SDK for Python" keywords = [ "durable", @@ -17,27 +17,68 @@ keywords = [ "workflow" ] classifiers = [ - "Development Status :: 3 - Alpha", - "Programming Language :: Python :: 3", - "License :: OSI Approved :: MIT License", + "Development Status :: 3 - Alpha", + "Programming Language :: Python :: 3", + "License :: OSI Approved :: MIT License", ] -requires-python = ">=3.8" +requires-python = ">=3.10" license = {file = "LICENSE"} readme = "README.md" dependencies = [ "grpcio", + "protobuf>=6.31.1,<7.0.0", # follows grpcio generation version https://github.com/grpc/grpc/blob/v1.75.1/tools/distrib/python/grpcio_tools/setup.py + "asyncio" ] [project.urls] -repository = "https://github.com/microsoft/durabletask-python" -changelog = "https://github.com/microsoft/durabletask-python/blob/main/CHANGELOG.md" +repository = "https://github.com/dapr/durabletask-python" +changelog = "https://github.com/dapr/durabletask-python/blob/main/CHANGELOG.md" [tool.setuptools.packages.find] include = ["durabletask", "durabletask.*"] +[tool.setuptools_scm] +version_scheme = "guess-next-dev" +local_scheme = "no-local-version" + [tool.pytest.ini_options] minversion = "6.0" testpaths = ["tests"] +pythonpath = ["."] markers = [ "e2e: mark a test as an end-to-end test that requires a running sidecar" ] + +[project.optional-dependencies] +dev = [ + "pytest", + "pytest-asyncio>=0.23", + "tox>=4.0.0", + "pytest-cov", + "ruff", + + # grpc gen + "grpcio-tools==1.75.1", +] + +[tool.ruff] +target-version = "py310" +line-length = 100 +extend-exclude = [".github", "durabletask/internal/orchestrator_service_*.*"] + +[tool.ruff.lint] +select = [ + "I", # isort + "W", # pycodestyle warnings + "F", # pyflakes + + # TODO: Add those back progressively as we fix the issues + # "E", # pycodestyle errors + # "C", # flake8-comprehensions + # "B", # flake8-bugbear + # "UP", # pyupgrade +] + +[tool.ruff.format] +# follow upstream quote-style instead of dapr/python-sdk to reduce diff +quote-style = "double" diff --git a/requirements.txt b/requirements.txt index 641cee7c..b6902e93 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1 @@ -autopep8 -grpcio -grpcio-tools -pytest -pytest-cov \ No newline at end of file +# pyproject.toml has the dependencies for this project \ No newline at end of file diff --git a/submodules/durabletask-protobuf b/submodules/durabletask-protobuf deleted file mode 160000 index c7d8cd89..00000000 --- a/submodules/durabletask-protobuf +++ /dev/null @@ -1 +0,0 @@ -Subproject commit c7d8cd898017342d090ba9531c3f2ec45b8e07e7 diff --git a/tests/test_activity_executor.py b/tests/durabletask/test_activity_executor.py similarity index 86% rename from tests/test_activity_executor.py rename to tests/durabletask/test_activity_executor.py index b9a4bd49..996ae440 100644 --- a/tests/test_activity_executor.py +++ b/tests/durabletask/test_activity_executor.py @@ -3,21 +3,23 @@ import json import logging -from typing import Any, Tuple, Union +from typing import Any, Optional, Tuple from durabletask import task, worker logging.basicConfig( - format='%(asctime)s.%(msecs)03d %(name)s %(levelname)s: %(message)s', - datefmt='%Y-%m-%d %H:%M:%S', - level=logging.DEBUG) + format="%(asctime)s.%(msecs)03d %(name)s %(levelname)s: %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", + level=logging.DEBUG, +) TEST_LOGGER = logging.getLogger("tests") -TEST_INSTANCE_ID = 'abc123' +TEST_INSTANCE_ID = "abc123" TEST_TASK_ID = 42 def test_activity_inputs(): """Validates activity function input population""" + def test_activity(ctx: task.ActivityContext, test_input: Any): # return all activity inputs back as the output return test_input, ctx.orchestration_id, ctx.task_id @@ -34,13 +36,12 @@ def test_activity(ctx: task.ActivityContext, test_input: Any): def test_activity_not_registered(): - def test_activity(ctx: task.ActivityContext, _): pass # not used executor, _ = _get_activity_executor(test_activity) - caught_exception: Union[Exception, None] = None + caught_exception: Optional[Exception] = None try: executor.execute(TEST_INSTANCE_ID, "Bogus", TEST_TASK_ID, None) except Exception as ex: diff --git a/tests/durabletask/test_client.py b/tests/durabletask/test_client.py new file mode 100644 index 00000000..c74ba176 --- /dev/null +++ b/tests/durabletask/test_client.py @@ -0,0 +1,167 @@ +from unittest.mock import MagicMock, patch + +from durabletask import client +from durabletask.internal.grpc_interceptor import DefaultClientInterceptorImpl +from durabletask.internal.shared import get_default_host_address, get_grpc_channel + +HOST_ADDRESS = "localhost:50051" +METADATA = [("key1", "value1"), ("key2", "value2")] +INTERCEPTORS = [DefaultClientInterceptorImpl(METADATA)] + + +def test_get_grpc_channel_insecure(): + with patch("grpc.insecure_channel") as mock_channel: + get_grpc_channel(HOST_ADDRESS, False, interceptors=INTERCEPTORS) + args, kwargs = mock_channel.call_args + assert args[0] == HOST_ADDRESS + assert "options" in kwargs and kwargs["options"] is None + + +def test_get_grpc_channel_secure(): + with ( + patch("grpc.secure_channel") as mock_channel, + patch("grpc.ssl_channel_credentials") as mock_credentials, + ): + get_grpc_channel(HOST_ADDRESS, True, interceptors=INTERCEPTORS) + args, kwargs = mock_channel.call_args + assert args[0] == HOST_ADDRESS + assert args[1] == mock_credentials.return_value + assert "options" in kwargs and kwargs["options"] is None + + +def test_get_grpc_channel_default_host_address(): + with patch("grpc.insecure_channel") as mock_channel: + get_grpc_channel(None, False, interceptors=INTERCEPTORS) + args, kwargs = mock_channel.call_args + assert args[0] == get_default_host_address() + assert "options" in kwargs and kwargs["options"] is None + + +def test_get_grpc_channel_with_metadata(): + with ( + patch("grpc.insecure_channel") as mock_channel, + patch("grpc.intercept_channel") as mock_intercept_channel, + ): + get_grpc_channel(HOST_ADDRESS, False, interceptors=INTERCEPTORS) + args, kwargs = mock_channel.call_args + assert args[0] == HOST_ADDRESS + assert "options" in kwargs and kwargs["options"] is None + mock_intercept_channel.assert_called_once() + + # Capture and check the arguments passed to intercept_channel() + args, kwargs = mock_intercept_channel.call_args + assert args[0] == mock_channel.return_value + assert isinstance(args[1], DefaultClientInterceptorImpl) + assert args[1]._metadata == METADATA + + +def test_grpc_channel_with_host_name_protocol_stripping(): + with ( + patch("grpc.insecure_channel") as mock_insecure_channel, + patch("grpc.secure_channel") as mock_secure_channel, + ): + host_name = "myserver.com:1234" + + prefix = "grpc://" + get_grpc_channel(prefix + host_name, interceptors=INTERCEPTORS) + args, kwargs = mock_insecure_channel.call_args + assert args[0] == host_name + assert "options" in kwargs and kwargs["options"] is None + + prefix = "http://" + get_grpc_channel(prefix + host_name, interceptors=INTERCEPTORS) + args, kwargs = mock_insecure_channel.call_args + assert args[0] == host_name + assert "options" in kwargs and kwargs["options"] is None + + prefix = "HTTP://" + get_grpc_channel(prefix + host_name, interceptors=INTERCEPTORS) + args, kwargs = mock_insecure_channel.call_args + assert args[0] == host_name + assert "options" in kwargs and kwargs["options"] is None + + prefix = "GRPC://" + get_grpc_channel(prefix + host_name, interceptors=INTERCEPTORS) + args, kwargs = mock_insecure_channel.call_args + assert args[0] == host_name + assert "options" in kwargs and kwargs["options"] is None + + prefix = "" + get_grpc_channel(prefix + host_name, interceptors=INTERCEPTORS) + args, kwargs = mock_insecure_channel.call_args + assert args[0] == host_name + assert "options" in kwargs and kwargs["options"] is None + + prefix = "grpcs://" + get_grpc_channel(prefix + host_name, interceptors=INTERCEPTORS) + args, kwargs = mock_secure_channel.call_args + assert args[0] == host_name + assert "options" in kwargs and kwargs["options"] is None + + prefix = "https://" + get_grpc_channel(prefix + host_name, interceptors=INTERCEPTORS) + args, kwargs = mock_secure_channel.call_args + assert args[0] == host_name + assert "options" in kwargs and kwargs["options"] is None + + prefix = "HTTPS://" + get_grpc_channel(prefix + host_name, interceptors=INTERCEPTORS) + args, kwargs = mock_secure_channel.call_args + assert args[0] == host_name + assert "options" in kwargs and kwargs["options"] is None + + prefix = "GRPCS://" + get_grpc_channel(prefix + host_name, interceptors=INTERCEPTORS) + args, kwargs = mock_secure_channel.call_args + assert args[0] == host_name + assert "options" in kwargs and kwargs["options"] is None + + prefix = "" + get_grpc_channel(prefix + host_name, True, interceptors=INTERCEPTORS) + args, kwargs = mock_secure_channel.call_args + assert args[0] == host_name + assert "options" in kwargs and kwargs["options"] is None + + +def test_sync_channel_passes_base_options_and_max_lengths(): + base_options = [ + ("grpc.max_send_message_length", 1234), + ("grpc.max_receive_message_length", 5678), + ("grpc.primary_user_agent", "durabletask-tests"), + ] + with patch("grpc.insecure_channel") as mock_channel: + get_grpc_channel(HOST_ADDRESS, False, options=base_options) + # Ensure called with options kwarg + assert mock_channel.call_count == 1 + args, kwargs = mock_channel.call_args + assert args[0] == HOST_ADDRESS + assert "options" in kwargs + opts = kwargs["options"] + # Check our base options made it through + assert ("grpc.max_send_message_length", 1234) in opts + assert ("grpc.max_receive_message_length", 5678) in opts + assert ("grpc.primary_user_agent", "durabletask-tests") in opts + + +def test_taskhub_client_close_handles_exceptions(): + """Test that close() handles exceptions gracefully (edge case not easily testable in E2E).""" + with patch("durabletask.internal.shared.get_grpc_channel") as mock_get_channel: + mock_channel = MagicMock() + mock_channel.close.side_effect = Exception("close failed") + mock_get_channel.return_value = mock_channel + + task_hub_client = client.TaskHubGrpcClient() + # Should not raise exception + task_hub_client.close() + + +def test_taskhub_client_close_closes_channel_handles_exceptions(): + """Test that close() closes the channel and handles exceptions gracefully.""" + with patch("durabletask.internal.shared.get_grpc_channel") as mock_get_channel: + mock_channel = MagicMock() + mock_channel.close.side_effect = Exception("close failed") + mock_get_channel.return_value = mock_channel + + task_hub_client = client.TaskHubGrpcClient() + task_hub_client.close() + mock_channel.close.assert_called_once() diff --git a/tests/durabletask/test_client_async.py b/tests/durabletask/test_client_async.py new file mode 100644 index 00000000..43e88705 --- /dev/null +++ b/tests/durabletask/test_client_async.py @@ -0,0 +1,171 @@ +# Copyright (c) The Dapr Authors. +# Licensed under the MIT License. + +from unittest.mock import patch + +from durabletask.aio.client import AsyncTaskHubGrpcClient +from durabletask.aio.internal.grpc_interceptor import DefaultClientInterceptorImpl +from durabletask.aio.internal.shared import get_grpc_aio_channel +from durabletask.internal.shared import get_default_host_address + +HOST_ADDRESS = "localhost:50051" +METADATA = [("key1", "value1"), ("key2", "value2")] +INTERCEPTORS_AIO = [DefaultClientInterceptorImpl(METADATA)] + + +def test_get_grpc_aio_channel_insecure(): + with patch("durabletask.aio.internal.shared.grpc_aio.insecure_channel") as mock_channel: + get_grpc_aio_channel(HOST_ADDRESS, False, interceptors=INTERCEPTORS_AIO) + args, kwargs = mock_channel.call_args + assert args[0] == HOST_ADDRESS + assert kwargs.get("interceptors") == INTERCEPTORS_AIO + assert "options" in kwargs and kwargs["options"] is None + + +def test_get_grpc_aio_channel_secure(): + with ( + patch("durabletask.aio.internal.shared.grpc_aio.secure_channel") as mock_channel, + patch("grpc.ssl_channel_credentials") as mock_credentials, + ): + get_grpc_aio_channel(HOST_ADDRESS, True, interceptors=INTERCEPTORS_AIO) + args, kwargs = mock_channel.call_args + assert args[0] == HOST_ADDRESS + assert args[1] == mock_credentials.return_value + assert kwargs.get("interceptors") == INTERCEPTORS_AIO + assert "options" in kwargs and kwargs["options"] is None + + +def test_get_grpc_aio_channel_default_host_address(): + with patch("durabletask.aio.internal.shared.grpc_aio.insecure_channel") as mock_channel: + get_grpc_aio_channel(None, False, interceptors=INTERCEPTORS_AIO) + args, kwargs = mock_channel.call_args + assert args[0] == get_default_host_address() + assert kwargs.get("interceptors") == INTERCEPTORS_AIO + assert "options" in kwargs and kwargs["options"] is None + + +def test_get_grpc_aio_channel_with_interceptors(): + with patch("durabletask.aio.internal.shared.grpc_aio.insecure_channel") as mock_channel: + get_grpc_aio_channel(HOST_ADDRESS, False, interceptors=INTERCEPTORS_AIO) + args, kwargs = mock_channel.call_args + assert args[0] == HOST_ADDRESS + assert kwargs.get("interceptors") == INTERCEPTORS_AIO + assert "options" in kwargs and kwargs["options"] is None + + # Capture and check the arguments passed to insecure_channel() + args, kwargs = mock_channel.call_args + assert args[0] == HOST_ADDRESS + assert "interceptors" in kwargs + interceptors = kwargs["interceptors"] + assert isinstance(interceptors[0], DefaultClientInterceptorImpl) + assert interceptors[0]._metadata == METADATA + + +def test_grpc_aio_channel_with_host_name_protocol_stripping(): + with ( + patch("durabletask.aio.internal.shared.grpc_aio.insecure_channel") as mock_insecure_channel, + patch("durabletask.aio.internal.shared.grpc_aio.secure_channel") as mock_secure_channel, + ): + host_name = "myserver.com:1234" + + prefix = "grpc://" + get_grpc_aio_channel(prefix + host_name, interceptors=INTERCEPTORS_AIO) + args, kwargs = mock_insecure_channel.call_args + assert args[0] == host_name + assert kwargs.get("interceptors") == INTERCEPTORS_AIO + assert "options" in kwargs and kwargs["options"] is None + + prefix = "http://" + get_grpc_aio_channel(prefix + host_name, interceptors=INTERCEPTORS_AIO) + args, kwargs = mock_insecure_channel.call_args + assert args[0] == host_name + assert kwargs.get("interceptors") == INTERCEPTORS_AIO + assert "options" in kwargs and kwargs["options"] is None + + prefix = "HTTP://" + get_grpc_aio_channel(prefix + host_name, interceptors=INTERCEPTORS_AIO) + args, kwargs = mock_insecure_channel.call_args + assert args[0] == host_name + assert kwargs.get("interceptors") == INTERCEPTORS_AIO + assert "options" in kwargs and kwargs["options"] is None + + prefix = "GRPC://" + get_grpc_aio_channel(prefix + host_name, interceptors=INTERCEPTORS_AIO) + args, kwargs = mock_insecure_channel.call_args + assert args[0] == host_name + assert kwargs.get("interceptors") == INTERCEPTORS_AIO + assert "options" in kwargs and kwargs["options"] is None + + prefix = "" + get_grpc_aio_channel(prefix + host_name, interceptors=INTERCEPTORS_AIO) + args, kwargs = mock_insecure_channel.call_args + assert args[0] == host_name + assert kwargs.get("interceptors") == INTERCEPTORS_AIO + assert "options" in kwargs and kwargs["options"] is None + + prefix = "grpcs://" + get_grpc_aio_channel(prefix + host_name, interceptors=INTERCEPTORS_AIO) + args, kwargs = mock_secure_channel.call_args + assert args[0] == host_name + assert kwargs.get("interceptors") == INTERCEPTORS_AIO + assert "options" in kwargs and kwargs["options"] is None + + prefix = "https://" + get_grpc_aio_channel(prefix + host_name, interceptors=INTERCEPTORS_AIO) + args, kwargs = mock_secure_channel.call_args + assert args[0] == host_name + assert kwargs.get("interceptors") == INTERCEPTORS_AIO + assert "options" in kwargs and kwargs["options"] is None + + prefix = "HTTPS://" + get_grpc_aio_channel(prefix + host_name, interceptors=INTERCEPTORS_AIO) + args, kwargs = mock_secure_channel.call_args + assert args[0] == host_name + assert kwargs.get("interceptors") == INTERCEPTORS_AIO + assert "options" in kwargs and kwargs["options"] is None + + prefix = "GRPCS://" + get_grpc_aio_channel(prefix + host_name, interceptors=INTERCEPTORS_AIO) + args, kwargs = mock_secure_channel.call_args + assert args[0] == host_name + assert kwargs.get("interceptors") == INTERCEPTORS_AIO + assert "options" in kwargs and kwargs["options"] is None + + prefix = "" + get_grpc_aio_channel(prefix + host_name, True, interceptors=INTERCEPTORS_AIO) + args, kwargs = mock_secure_channel.call_args + assert args[0] == host_name + assert kwargs.get("interceptors") == INTERCEPTORS_AIO + assert "options" in kwargs and kwargs["options"] is None + + +def test_async_client_construct_with_metadata(): + with patch("durabletask.aio.internal.shared.grpc_aio.insecure_channel") as mock_channel: + AsyncTaskHubGrpcClient(host_address=HOST_ADDRESS, metadata=METADATA) + # Ensure channel created with an interceptor that has the expected metadata + args, kwargs = mock_channel.call_args + assert args[0] == HOST_ADDRESS + assert "interceptors" in kwargs + interceptors = kwargs["interceptors"] + assert isinstance(interceptors[0], DefaultClientInterceptorImpl) + assert interceptors[0]._metadata == METADATA + + +def test_aio_channel_passes_base_options_and_max_lengths(): + base_options = [ + ("grpc.max_send_message_length", 4321), + ("grpc.max_receive_message_length", 8765), + ("grpc.primary_user_agent", "durabletask-aio-tests"), + ] + with patch("durabletask.aio.internal.shared.grpc_aio.insecure_channel") as mock_channel: + get_grpc_aio_channel(HOST_ADDRESS, False, options=base_options) + # Ensure called with options kwarg + assert mock_channel.call_count == 1 + args, kwargs = mock_channel.call_args + assert args[0] == HOST_ADDRESS + assert "options" in kwargs + opts = kwargs["options"] + # Check our base options made it through + assert ("grpc.max_send_message_length", 4321) in opts + assert ("grpc.max_receive_message_length", 8765) in opts + assert ("grpc.primary_user_agent", "durabletask-aio-tests") in opts diff --git a/tests/durabletask/test_concurrency_options.py b/tests/durabletask/test_concurrency_options.py new file mode 100644 index 00000000..a923383b --- /dev/null +++ b/tests/durabletask/test_concurrency_options.py @@ -0,0 +1,92 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +import os + +from durabletask.worker import ConcurrencyOptions, TaskHubGrpcWorker + + +def test_default_concurrency_options(): + """Test that default concurrency options work correctly.""" + options = ConcurrencyOptions() + processor_count = os.cpu_count() or 1 + expected_default = 100 * processor_count + expected_workers = processor_count + 4 + + assert options.maximum_concurrent_activity_work_items == expected_default + assert options.maximum_concurrent_orchestration_work_items == expected_default + assert options.maximum_thread_pool_workers == expected_workers + + +def test_custom_concurrency_options(): + """Test that custom concurrency options work correctly.""" + options = ConcurrencyOptions( + maximum_concurrent_activity_work_items=50, + maximum_concurrent_orchestration_work_items=25, + maximum_thread_pool_workers=30, + ) + + assert options.maximum_concurrent_activity_work_items == 50 + assert options.maximum_concurrent_orchestration_work_items == 25 + assert options.maximum_thread_pool_workers == 30 + + +def test_partial_custom_options(): + """Test that partially specified options use defaults for unspecified values.""" + processor_count = os.cpu_count() or 1 + expected_default = 100 * processor_count + expected_workers = processor_count + 4 + + options = ConcurrencyOptions(maximum_concurrent_activity_work_items=30) + + assert options.maximum_concurrent_activity_work_items == 30 + assert options.maximum_concurrent_orchestration_work_items == expected_default + assert options.maximum_thread_pool_workers == expected_workers + + +def test_worker_with_concurrency_options(): + """Test that TaskHubGrpcWorker accepts concurrency options.""" + options = ConcurrencyOptions( + maximum_concurrent_activity_work_items=10, + maximum_concurrent_orchestration_work_items=20, + maximum_thread_pool_workers=15, + ) + + worker = TaskHubGrpcWorker(concurrency_options=options) + + assert worker.concurrency_options == options + + +def test_worker_default_options(): + """Test that TaskHubGrpcWorker uses default options when no parameters are provided.""" + worker = TaskHubGrpcWorker() + + processor_count = os.cpu_count() or 1 + expected_default = 100 * processor_count + expected_workers = processor_count + 4 + + assert worker.concurrency_options.maximum_concurrent_activity_work_items == expected_default + assert ( + worker.concurrency_options.maximum_concurrent_orchestration_work_items == expected_default + ) + assert worker.concurrency_options.maximum_thread_pool_workers == expected_workers + + +def test_concurrency_options_property_access(): + """Test that the concurrency_options property works correctly.""" + options = ConcurrencyOptions( + maximum_concurrent_activity_work_items=15, + maximum_concurrent_orchestration_work_items=25, + maximum_thread_pool_workers=30, + ) + + worker = TaskHubGrpcWorker(concurrency_options=options) + retrieved_options = worker.concurrency_options + + # Should be the same object + assert retrieved_options is options + + # Should have correct values + assert retrieved_options.maximum_concurrent_activity_work_items == 15 + assert retrieved_options.maximum_concurrent_orchestration_work_items == 25 + assert retrieved_options.maximum_thread_pool_workers == 30 diff --git a/tests/durabletask/test_deterministic.py b/tests/durabletask/test_deterministic.py new file mode 100644 index 00000000..f8f3acf1 --- /dev/null +++ b/tests/durabletask/test_deterministic.py @@ -0,0 +1,455 @@ +""" +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import random +import uuid +from datetime import datetime, timezone + +import pytest + +from durabletask.deterministic import ( + DeterminismSeed, + derive_seed, + deterministic_random, + deterministic_uuid4, + deterministic_uuid_v5, +) +from durabletask.worker import _RuntimeOrchestrationContext + + +class TestDeterminismSeed: + """Test DeterminismSeed dataclass and its methods.""" + + def test_to_int_produces_consistent_result(self): + """Test that to_int produces the same result for same inputs.""" + seed1 = DeterminismSeed(instance_id="test-123", orchestration_unix_ts=1234567890) + seed2 = DeterminismSeed(instance_id="test-123", orchestration_unix_ts=1234567890) + assert seed1.to_int() == seed2.to_int() + + def test_to_int_produces_different_results_for_different_instance_ids(self): + """Test that different instance IDs produce different seeds.""" + seed1 = DeterminismSeed(instance_id="test-123", orchestration_unix_ts=1234567890) + seed2 = DeterminismSeed(instance_id="test-456", orchestration_unix_ts=1234567890) + assert seed1.to_int() != seed2.to_int() + + def test_to_int_produces_different_results_for_different_timestamps(self): + """Test that different timestamps produce different seeds.""" + seed1 = DeterminismSeed(instance_id="test-123", orchestration_unix_ts=1234567890) + seed2 = DeterminismSeed(instance_id="test-123", orchestration_unix_ts=1234567891) + assert seed1.to_int() != seed2.to_int() + + def test_to_int_returns_positive_integer(self): + """Test that to_int returns a positive integer.""" + seed = DeterminismSeed(instance_id="test-123", orchestration_unix_ts=1234567890) + result = seed.to_int() + assert isinstance(result, int) + assert result >= 0 + + +class TestDeriveSeed: + """Test derive_seed function.""" + + def test_derive_seed_is_deterministic(self): + """Test that derive_seed produces consistent results.""" + instance_id = "test-instance" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + seed1 = derive_seed(instance_id, dt) + seed2 = derive_seed(instance_id, dt) + assert seed1 == seed2 + + def test_derive_seed_different_for_different_times(self): + """Test that different times produce different seeds.""" + instance_id = "test-instance" + dt1 = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + dt2 = datetime(2025, 1, 1, 12, 0, 1, tzinfo=timezone.utc) + seed1 = derive_seed(instance_id, dt1) + seed2 = derive_seed(instance_id, dt2) + assert seed1 != seed2 + + def test_derive_seed_handles_timezone_aware_datetime(self): + """Test that derive_seed works with timezone-aware datetimes.""" + instance_id = "test-instance" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + seed = derive_seed(instance_id, dt) + assert isinstance(seed, int) + + +class TestDeterministicRandom: + """Test deterministic_random function.""" + + def test_deterministic_random_returns_random_object(self): + """Test that deterministic_random returns a Random instance.""" + instance_id = "test-instance" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + rnd = deterministic_random(instance_id, dt) + assert isinstance(rnd, random.Random) + + def test_deterministic_random_produces_same_sequence(self): + """Test that same inputs produce same random sequence.""" + instance_id = "test-instance" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + rnd1 = deterministic_random(instance_id, dt) + rnd2 = deterministic_random(instance_id, dt) + + sequence1 = [rnd1.random() for _ in range(10)] + sequence2 = [rnd2.random() for _ in range(10)] + assert sequence1 == sequence2 + + def test_deterministic_random_different_for_different_inputs(self): + """Test that different inputs produce different sequences.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + rnd1 = deterministic_random("instance-1", dt) + rnd2 = deterministic_random("instance-2", dt) + + val1 = rnd1.random() + val2 = rnd2.random() + assert val1 != val2 + + +class TestDeterministicUuid4: + """Test deterministic_uuid4 function.""" + + def test_deterministic_uuid4_returns_valid_uuid(self): + """Test that deterministic_uuid4 returns a valid UUID4.""" + rnd = random.Random(42) + result = deterministic_uuid4(rnd) + assert isinstance(result, uuid.UUID) + assert result.version == 4 + + def test_deterministic_uuid4_is_deterministic(self): + """Test that same random state produces same UUID.""" + rnd1 = random.Random(42) + rnd2 = random.Random(42) + uuid1 = deterministic_uuid4(rnd1) + uuid2 = deterministic_uuid4(rnd2) + assert uuid1 == uuid2 + + def test_deterministic_uuid4_different_for_different_seeds(self): + """Test that different seeds produce different UUIDs.""" + rnd1 = random.Random(42) + rnd2 = random.Random(43) + uuid1 = deterministic_uuid4(rnd1) + uuid2 = deterministic_uuid4(rnd2) + assert uuid1 != uuid2 + + +class TestDeterministicUuidV5: + """Test deterministic_uuid_v5 function (matching .NET implementation).""" + + def test_deterministic_uuid_v5_returns_valid_uuid(self): + """Test that deterministic_uuid_v5 returns a valid UUID v5.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + result = deterministic_uuid_v5("test-instance", dt, 0) + assert isinstance(result, uuid.UUID) + assert result.version == 5 + + def test_deterministic_uuid_v5_is_deterministic(self): + """Test that same inputs produce same UUID.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + uuid1 = deterministic_uuid_v5("test-instance", dt, 0) + uuid2 = deterministic_uuid_v5("test-instance", dt, 0) + assert uuid1 == uuid2 + + def test_deterministic_uuid_v5_different_for_different_counters(self): + """Test that different counters produce different UUIDs.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + uuid1 = deterministic_uuid_v5("test-instance", dt, 0) + uuid2 = deterministic_uuid_v5("test-instance", dt, 1) + assert uuid1 != uuid2 + + def test_deterministic_uuid_v5_different_for_different_instance_ids(self): + """Test that different instance IDs produce different UUIDs.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + uuid1 = deterministic_uuid_v5("instance-1", dt, 0) + uuid2 = deterministic_uuid_v5("instance-2", dt, 0) + assert uuid1 != uuid2 + + def test_deterministic_uuid_v5_different_for_different_datetimes(self): + """Test that different datetimes produce different UUIDs.""" + dt1 = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + dt2 = datetime(2025, 1, 1, 12, 0, 1, tzinfo=timezone.utc) + uuid1 = deterministic_uuid_v5("test-instance", dt1, 0) + uuid2 = deterministic_uuid_v5("test-instance", dt2, 0) + assert uuid1 != uuid2 + + def test_deterministic_uuid_v5_matches_expected_format(self): + """Test that UUID v5 uses the correct namespace.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + result = deterministic_uuid_v5("test-instance", dt, 0) + # Should be deterministic - same inputs always produce same output + expected = deterministic_uuid_v5("test-instance", dt, 0) + assert result == expected + + def test_deterministic_uuid_v5_counter_sequence(self): + """Test that incrementing counter produces different UUIDs in sequence.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + uuids = [deterministic_uuid_v5("test-instance", dt, i) for i in range(5)] + # All should be different + assert len(set(uuids)) == 5 + # But calling with same counter should produce same UUID + assert uuids[0] == deterministic_uuid_v5("test-instance", dt, 0) + assert uuids[4] == deterministic_uuid_v5("test-instance", dt, 4) + + +def mock_deterministic_context( + instance_id: str, current_utc_datetime: datetime +) -> _RuntimeOrchestrationContext: + """Mock context for testing DeterministicContextMixin.""" + ctx = _RuntimeOrchestrationContext(instance_id) + ctx.current_utc_datetime = current_utc_datetime + return ctx + + +class TestDeterministicContextMixin: + """Test DeterministicContextMixin methods.""" + + def test_now_returns_current_utc_datetime(self): + """Test that now() returns the orchestration time.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + ctx = mock_deterministic_context("test-instance", dt) + assert ctx.now() == dt + + def test_random_returns_deterministic_prng(self): + """Test that random() returns a deterministic PRNG.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + ctx = mock_deterministic_context("test-instance", dt) + rnd1 = ctx.random() + rnd2 = ctx.random() + + # Both should produce same sequence + assert isinstance(rnd1, random.Random) + assert isinstance(rnd2, random.Random) + assert rnd1.random() == rnd2.random() + + def test_random_has_deterministic_marker(self): + """Test that random() sets _dt_deterministic marker.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + ctx = mock_deterministic_context("test-instance", dt) + rnd = ctx.random() + assert hasattr(rnd, "_dt_deterministic") + assert rnd._dt_deterministic is True + + def test_uuid4_generates_deterministic_uuid(self): + """Test that uuid4() generates deterministic UUIDs v5 with counter.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + ctx1 = mock_deterministic_context("test-instance", dt) + ctx2 = mock_deterministic_context("test-instance", dt) + + uuid1 = ctx1.uuid4() + uuid2 = ctx2.uuid4() + + assert isinstance(uuid1, uuid.UUID) + assert uuid1.version == 5 # Now using UUID v5 like .NET + assert uuid1 == uuid2 # Same counter (0) produces same UUID + + def test_uuid4_increments_counter(self): + """Test that uuid4() increments counter producing different UUIDs.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + ctx = mock_deterministic_context("test-instance", dt) + + uuid1 = ctx.uuid4() # counter=0 + uuid2 = ctx.uuid4() # counter=1 + uuid3 = ctx.uuid4() # counter=2 + + # All should be different due to counter + assert uuid1 != uuid2 + assert uuid2 != uuid3 + assert uuid1 != uuid3 + + def test_uuid4_counter_resets_on_replay(self): + """Test that counter resets on new context (simulating replay).""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + + # First execution + ctx1 = mock_deterministic_context("test-instance", dt) + uuid1_first = ctx1.uuid4() # counter=0 + uuid1_second = ctx1.uuid4() # counter=1 + + # Replay - new context, counter resets + ctx2 = mock_deterministic_context("test-instance", dt) + uuid2_first = ctx2.uuid4() # counter=0 + uuid2_second = ctx2.uuid4() # counter=1 + + # Same counter values produce same UUIDs (determinism!) + assert uuid1_first == uuid2_first + assert uuid1_second == uuid2_second + + def test_new_guid_is_alias_for_uuid4(self): + """Test that new_guid() is an alias for uuid4().""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + ctx = mock_deterministic_context("test-instance", dt) + + guid1 = ctx.new_guid() # counter=0 + guid2 = ctx.uuid4() # counter=1 + + # Both should be v5 UUIDs, but different due to counter increment + assert isinstance(guid1, uuid.UUID) + assert isinstance(guid2, uuid.UUID) + assert guid1.version == 5 + assert guid2.version == 5 + assert guid1 != guid2 # Different due to counter + + # Verify determinism - same counter produces same UUID + ctx2 = mock_deterministic_context("test-instance", dt) + guid3 = ctx2.new_guid() # counter=0 + assert guid3 == guid1 # Same as first call + + def test_random_string_generates_string_of_correct_length(self): + """Test that random_string() generates string of specified length.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + ctx = mock_deterministic_context("test-instance", dt) + + s = ctx.random_string(10) + assert len(s) == 10 + + def test_random_string_is_deterministic(self): + """Test that random_string() produces consistent results.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + ctx1 = mock_deterministic_context("test-instance", dt) + ctx2 = mock_deterministic_context("test-instance", dt) + + s1 = ctx1.random_string(20) + s2 = ctx2.random_string(20) + assert s1 == s2 + + def test_random_string_uses_default_alphabet(self): + """Test that random_string() uses alphanumeric characters by default.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + ctx = mock_deterministic_context("test-instance", dt) + + s = ctx.random_string(100) + assert all(c.isalnum() for c in s) + + def test_random_string_uses_custom_alphabet(self): + """Test that random_string() respects custom alphabet.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + ctx = mock_deterministic_context("test-instance", dt) + + s = ctx.random_string(50, alphabet="ABC") + assert all(c in "ABC" for c in s) + + def test_random_string_raises_on_negative_length(self): + """Test that random_string() raises ValueError for negative length.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + ctx = mock_deterministic_context("test-instance", dt) + + with pytest.raises(ValueError, match="length must be non-negative"): + ctx.random_string(-1) + + def test_random_string_raises_on_empty_alphabet(self): + """Test that random_string() raises ValueError for empty alphabet.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + ctx = mock_deterministic_context("test-instance", dt) + + with pytest.raises(ValueError, match="alphabet must not be empty"): + ctx.random_string(10, alphabet="") + + def test_random_string_handles_zero_length(self): + """Test that random_string() handles zero length correctly.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + ctx = mock_deterministic_context("test-instance", dt) + + s = ctx.random_string(0) + assert s == "" + + def test_random_int_generates_int_in_range(self): + """Test that random_int() generates integer in specified range.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + ctx = mock_deterministic_context("test-instance", dt) + + for _ in range(10): + val = ctx.random_int(10, 20) + assert 10 <= val <= 20 + + def test_random_int_is_deterministic(self): + """Test that random_int() produces consistent results.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + ctx1 = mock_deterministic_context("test-instance", dt) + ctx2 = mock_deterministic_context("test-instance", dt) + + val1 = ctx1.random_int(0, 1000) + val2 = ctx2.random_int(0, 1000) + assert val1 == val2 + + def test_random_int_uses_default_range(self): + """Test that random_int() uses default range when not specified.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + ctx = mock_deterministic_context("test-instance", dt) + + val = ctx.random_int() + assert 0 <= val <= 2**31 - 1 + + def test_random_int_raises_on_invalid_range(self): + """Test that random_int() raises ValueError when min > max.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + ctx = mock_deterministic_context("test-instance", dt) + + with pytest.raises(ValueError, match="min_value must be <= max_value"): + ctx.random_int(20, 10) + + def test_random_int_handles_same_min_and_max(self): + """Test that random_int() handles case where min equals max.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + ctx = mock_deterministic_context("test-instance", dt) + + val = ctx.random_int(42, 42) + assert val == 42 + + def test_random_choice_picks_from_sequence(self): + """Test that random_choice() picks element from sequence.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + ctx = mock_deterministic_context("test-instance", dt) + + choices = ["a", "b", "c", "d", "e"] + result = ctx.random_choice(choices) + assert result in choices + + def test_random_choice_is_deterministic(self): + """Test that random_choice() produces consistent results.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + ctx1 = mock_deterministic_context("test-instance", dt) + ctx2 = mock_deterministic_context("test-instance", dt) + + choices = list(range(100)) + result1 = ctx1.random_choice(choices) + result2 = ctx2.random_choice(choices) + assert result1 == result2 + + def test_random_choice_raises_on_empty_sequence(self): + """Test that random_choice() raises IndexError for empty sequence.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + ctx = mock_deterministic_context("test-instance", dt) + + with pytest.raises(IndexError, match="Cannot choose from empty sequence"): + ctx.random_choice([]) + + def test_random_choice_works_with_different_sequence_types(self): + """Test that random_choice() works with various sequence types.""" + dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc) + ctx = mock_deterministic_context("test-instance", dt) + + # List + result = ctx.random_choice([1, 2, 3]) + assert result in [1, 2, 3] + + # Reset context for deterministic behavior + ctx = mock_deterministic_context("test-instance", dt) + # Tuple + result = ctx.random_choice((1, 2, 3)) + assert result in (1, 2, 3) + + # Reset context for deterministic behavior + ctx = mock_deterministic_context("test-instance", dt) + # String + result = ctx.random_choice("abc") + assert result in "abc" diff --git a/tests/durabletask/test_orchestration_e2e.py b/tests/durabletask/test_orchestration_e2e.py new file mode 100644 index 00000000..181d71d4 --- /dev/null +++ b/tests/durabletask/test_orchestration_e2e.py @@ -0,0 +1,775 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +import json +import threading +import time +from datetime import timedelta +from typing import Optional + +import pytest + +from durabletask import client, task, worker + +# NOTE: These tests assume a sidecar process is running. Example command: +# dapr init || true +# dapr run --app-id test-app --dapr-grpc-port 4001 +pytestmark = pytest.mark.e2e + + +def _wait_until_terminal( + hub_client: client.TaskHubGrpcClient, + instance_id: str, + *, + timeout_s: int = 30, + fetch_payloads: bool = True, +) -> Optional[client.OrchestrationState]: + """Polling-based completion wait that does not rely on the completion stream. + + Returns the terminal state or None if timeout. + """ + deadline = time.time() + timeout_s + delay = 0.1 + while time.time() < deadline: + st = hub_client.get_orchestration_state(instance_id, fetch_payloads=fetch_payloads) + if st and st.runtime_status in ( + client.OrchestrationStatus.COMPLETED, + client.OrchestrationStatus.FAILED, + client.OrchestrationStatus.TERMINATED, + client.OrchestrationStatus.CANCELED, + ): + return st + time.sleep(delay) + delay = min(delay * 1.5, 1.0) + return None + + +def test_empty_orchestration(): + invoked = False + + def empty_orchestrator(ctx: task.OrchestrationContext, _): + nonlocal invoked # don't do this in a real app! + invoked = True + + channel_options = [ + ("grpc.max_send_message_length", 1024 * 1024), # 1MB + ] + + # Start a worker, which will connect to the sidecar in a background thread + with worker.TaskHubGrpcWorker(channel_options=channel_options) as w: + w.add_orchestrator(empty_orchestrator) + w.start() + + # set a custom max send length option + c = client.TaskHubGrpcClient(channel_options=channel_options) + id = c.schedule_new_orchestration(empty_orchestrator) + state = c.wait_for_orchestration_completion(id, timeout=30) + + # Test calling wait again on already-completed orchestration (should return immediately) + state2 = c.wait_for_orchestration_completion(id, timeout=30) + assert state2 is not None + assert state2.runtime_status == client.OrchestrationStatus.COMPLETED + + assert invoked + assert state is not None + assert state.name == task.get_name(empty_orchestrator) + assert state.instance_id == id + assert state.failure_details is None + assert state.runtime_status == client.OrchestrationStatus.COMPLETED + assert state.serialized_input is None + assert state.serialized_output is None + assert state.serialized_custom_status is None + + +def test_activity_sequence(): + def plus_one(_: task.ActivityContext, input: int) -> int: + return input + 1 + + def sequence(ctx: task.OrchestrationContext, start_val: int): + numbers = [start_val] + current = start_val + for _ in range(10): + current = yield ctx.call_activity(plus_one, input=current) + numbers.append(current) + return numbers + + # Start a worker, which will connect to the sidecar in a background thread + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: + w.add_orchestrator(sequence) + w.add_activity(plus_one) + w.start() + + with client.TaskHubGrpcClient() as task_hub_client: + id = task_hub_client.schedule_new_orchestration(sequence, input=1) + state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) + + assert state is not None + assert state.name == task.get_name(sequence) + assert state.instance_id == id + assert state.runtime_status == client.OrchestrationStatus.COMPLETED + assert state.failure_details is None + assert state.serialized_input == json.dumps(1) + assert state.serialized_output == json.dumps([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) + assert state.serialized_custom_status is None + + +def test_activity_error_handling(): + def throw(_: task.ActivityContext, input: int) -> int: + raise RuntimeError("Kah-BOOOOM!!!") + + compensation_counter = 0 + + def increment_counter(ctx, _): + nonlocal compensation_counter + compensation_counter += 1 + + def orchestrator(ctx: task.OrchestrationContext, input: int): + error_msg = "" + try: + yield ctx.call_activity(throw, input=input) + except task.TaskFailedError as e: + error_msg = e.details.message + + # compensating actions + yield ctx.call_activity(increment_counter) + yield ctx.call_activity(increment_counter) + + return error_msg + + # Start a worker, which will connect to the sidecar in a background thread + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: + w.add_orchestrator(orchestrator) + w.add_activity(throw) + w.add_activity(increment_counter) + w.start() + + with client.TaskHubGrpcClient() as task_hub_client: + id = task_hub_client.schedule_new_orchestration(orchestrator, input=1) + state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) + + assert state is not None + assert state.name == task.get_name(orchestrator) + assert state.instance_id == id + assert state.runtime_status == client.OrchestrationStatus.COMPLETED + assert state.serialized_output == json.dumps("Kah-BOOOOM!!!") + assert state.failure_details is None + assert state.serialized_custom_status is None + assert compensation_counter == 2 + + +def test_sub_orchestration_fan_out(): + threadLock = threading.Lock() + activity_counter = 0 + + def increment(ctx, _): + with threadLock: + nonlocal activity_counter + activity_counter += 1 + + def orchestrator_child(ctx: task.OrchestrationContext, activity_count: int): + for _ in range(activity_count): + yield ctx.call_activity(increment) + + def parent_orchestrator(ctx: task.OrchestrationContext, count: int): + # Fan out to multiple sub-orchestrations + tasks = [] + for _ in range(count): + tasks.append(ctx.call_sub_orchestrator(orchestrator_child, input=3)) + # Wait for all sub-orchestrations to complete + yield task.when_all(tasks) + + # Start a worker, which will connect to the sidecar in a background thread + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: + w.add_activity(increment) + w.add_orchestrator(orchestrator_child) + w.add_orchestrator(parent_orchestrator) + w.start() + + with client.TaskHubGrpcClient() as task_hub_client: + id = task_hub_client.schedule_new_orchestration(parent_orchestrator, input=10) + state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) + + assert state is not None + assert state.runtime_status == client.OrchestrationStatus.COMPLETED + assert state.failure_details is None + assert activity_counter == 30 + + +def test_wait_for_multiple_external_events(): + def orchestrator(ctx: task.OrchestrationContext, _): + a = yield ctx.wait_for_external_event("A") + b = yield ctx.wait_for_external_event("B") + c = yield ctx.wait_for_external_event("C") + return [a, b, c] + + # Start a worker, which will connect to the sidecar in a background thread + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: + w.add_orchestrator(orchestrator) + w.start() + + # Start the orchestration and immediately raise events to it. + task_hub_client = client.TaskHubGrpcClient() + id = task_hub_client.schedule_new_orchestration(orchestrator) + task_hub_client.raise_orchestration_event(id, "A", data="a") + task_hub_client.raise_orchestration_event(id, "B", data="b") + task_hub_client.raise_orchestration_event(id, "C", data="c") + state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) + + assert state is not None + assert state.runtime_status == client.OrchestrationStatus.COMPLETED + assert state.serialized_output == json.dumps(["a", "b", "c"]) + + +@pytest.mark.parametrize("raise_event", [True, False]) +def test_wait_for_external_event_timeout(raise_event: bool): + def orchestrator(ctx: task.OrchestrationContext, _): + approval: task.Task[bool] = ctx.wait_for_external_event("Approval") + timeout = ctx.create_timer(timedelta(seconds=3)) + winner = yield task.when_any([approval, timeout]) + if winner == approval: + return "approved" + else: + return "timed out" + + # Start a worker, which will connect to the sidecar in a background thread + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: + w.add_orchestrator(orchestrator) + w.start() + + # Start the orchestration and immediately raise events to it. + with client.TaskHubGrpcClient() as task_hub_client: + id = task_hub_client.schedule_new_orchestration(orchestrator) + if raise_event: + task_hub_client.raise_orchestration_event(id, "Approval") + state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) + + assert state is not None + assert state.runtime_status == client.OrchestrationStatus.COMPLETED + if raise_event: + assert state.serialized_output == json.dumps("approved") + else: + assert state.serialized_output == json.dumps("timed out") + + +def test_suspend_and_resume(): + def orchestrator(ctx: task.OrchestrationContext, _): + result = yield ctx.wait_for_external_event("my_event") + return result + + # Start a worker, which will connect to the sidecar in a background thread + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: + w.add_orchestrator(orchestrator) + w.start() + + with client.TaskHubGrpcClient() as task_hub_client: + id = task_hub_client.schedule_new_orchestration(orchestrator) + state = task_hub_client.wait_for_orchestration_start(id, timeout=30) + assert state is not None + + # Suspend the orchestration and wait for it to go into the SUSPENDED state + task_hub_client.suspend_orchestration(id) + while state.runtime_status == client.OrchestrationStatus.RUNNING: + time.sleep(0.1) + state = task_hub_client.get_orchestration_state(id) + assert state is not None + assert state.runtime_status == client.OrchestrationStatus.SUSPENDED + + # Raise an event to the orchestration and confirm that it does NOT complete + task_hub_client.raise_orchestration_event(id, "my_event", data=42) + try: + state = task_hub_client.wait_for_orchestration_completion(id, timeout=3) + assert False, "Orchestration should not have completed" + except TimeoutError: + pass + + # Resume the orchestration and wait for it to complete + task_hub_client.resume_orchestration(id) + state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) + assert state is not None + assert state.runtime_status == client.OrchestrationStatus.COMPLETED + assert state.serialized_output == json.dumps(42) + + +def test_terminate(): + def orchestrator(ctx: task.OrchestrationContext, _): + result = yield ctx.wait_for_external_event("my_event") + return result + + # Start a worker, which will connect to the sidecar in a background thread + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: + w.add_orchestrator(orchestrator) + w.start() + + with client.TaskHubGrpcClient() as task_hub_client: + id = task_hub_client.schedule_new_orchestration(orchestrator) + state = task_hub_client.wait_for_orchestration_start(id, timeout=30) + assert state is not None + assert state.runtime_status == client.OrchestrationStatus.RUNNING + + task_hub_client.terminate_orchestration(id, output="some reason for termination") + state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) + assert state is not None + assert state.runtime_status == client.OrchestrationStatus.TERMINATED + assert state.serialized_output == json.dumps("some reason for termination") + + +def test_terminate_recursive(): + thread_lock = threading.Lock() + activity_counter = 0 + delay_time = ( + 2 # seconds (already optimized from 4s - don't reduce further as it can leads to failure) + ) + + def increment(ctx, _): + with thread_lock: + nonlocal activity_counter + activity_counter += 1 + raise Exception("Failed: Should not have executed the activity") + + def orchestrator_child(ctx: task.OrchestrationContext, activity_count: int): + due_time = ctx.current_utc_datetime + timedelta(seconds=delay_time) + yield ctx.create_timer(due_time) + yield ctx.call_activity(increment) + + def parent_orchestrator(ctx: task.OrchestrationContext, count: int): + tasks = [] + for _ in range(count): + tasks.append(ctx.call_sub_orchestrator(orchestrator_child, input=count)) + yield task.when_all(tasks) + + for recurse in [True, False]: + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: + w.add_activity(increment) + w.add_orchestrator(orchestrator_child) + w.add_orchestrator(parent_orchestrator) + w.start() + + with client.TaskHubGrpcClient() as task_hub_client: + instance_id = task_hub_client.schedule_new_orchestration( + parent_orchestrator, input=5 + ) + + time.sleep(1) # Brief delay to let orchestrations start + + output = "Recursive termination = {recurse}" + task_hub_client.terminate_orchestration( + instance_id, output=output, recursive=recurse + ) + + metadata = task_hub_client.wait_for_orchestration_completion( + instance_id, timeout=30 + ) + assert metadata is not None + assert metadata.runtime_status == client.OrchestrationStatus.TERMINATED + assert metadata.serialized_output == f'"{output}"' + time.sleep(delay_time) # Wait for timer to check activity execution + if recurse: + assert activity_counter == 0, ( + "Activity should not have executed with recursive termination" + ) + else: + assert activity_counter == 5, ( + "Activity should have executed without recursive termination" + ) + + +def test_continue_as_new(): + all_results = [] + + def orchestrator(ctx: task.OrchestrationContext, input: int): + result = yield ctx.wait_for_external_event("my_event") + if not ctx.is_replaying: + # NOTE: Real orchestrations should never interact with nonlocal variables like this. + nonlocal all_results # noqa: F824 + all_results.append(result) + + if len(all_results) <= 4: + ctx.continue_as_new(max(all_results), save_events=True) + else: + return all_results + + # Start a worker, which will connect to the sidecar in a background thread + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: + w.add_orchestrator(orchestrator) + w.start() + + task_hub_client = client.TaskHubGrpcClient() + id = task_hub_client.schedule_new_orchestration(orchestrator, input=0) + task_hub_client.raise_orchestration_event(id, "my_event", data=1) + task_hub_client.raise_orchestration_event(id, "my_event", data=2) + task_hub_client.raise_orchestration_event(id, "my_event", data=3) + task_hub_client.raise_orchestration_event(id, "my_event", data=4) + task_hub_client.raise_orchestration_event(id, "my_event", data=5) + + state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) + assert state is not None + assert state.runtime_status == client.OrchestrationStatus.COMPLETED + assert state.serialized_output == json.dumps(all_results) + assert state.serialized_input == json.dumps(4) + assert all_results == [1, 2, 3, 4, 5] + + +def test_continue_as_new_with_activity_e2e(): + """E2E test for continue_as_new with activities (generator-based).""" + activity_results = [] + + def double_activity(ctx: task.ActivityContext, value: int) -> int: + """Activity that doubles the value.""" + result = value * 2 + activity_results.append(result) + return result + + def orchestrator(ctx: task.OrchestrationContext, counter: int): + # Call activity to process the counter + processed = yield ctx.call_activity(double_activity, input=counter) + + # Continue as new up to 3 times + if counter < 3: + ctx.continue_as_new(counter + 1, save_events=False) + else: + return {"counter": counter, "processed": processed, "all_results": activity_results} + + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: + w.add_activity(double_activity) + w.add_orchestrator(orchestrator) + w.start() + + task_hub_client = client.TaskHubGrpcClient() + id = task_hub_client.schedule_new_orchestration(orchestrator, input=1) + + state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) + assert state is not None + assert state.runtime_status == client.OrchestrationStatus.COMPLETED + + output = json.loads(state.serialized_output) + # Should have called activity 3 times with input values 1, 2, 3 + assert activity_results == [2, 4, 6] + assert output["counter"] == 3 + assert output["processed"] == 6 + + +# NOTE: This test fails when running against durabletask-go with sqlite because the sqlite backend does not yet +# support orchestration ID reuse. This gap is being tracked here: +# https://github.com/microsoft/durabletask-go/issues/42 +def test_retry_policies(): + # This test verifies that the retry policies are working as expected. + # It does this by creating an orchestration that calls a sub-orchestrator, + # which in turn calls an activity that always fails. + # In this test, the retry policies are added, and the orchestration + # should still fail. But, number of times the sub-orchestrator and activity + # is called should increase as per the retry policies. + + child_orch_counter = 0 + throw_activity_counter = 0 + + # Second setup: With retry policies (minimal delays for faster tests) + retry_policy = task.RetryPolicy( + first_retry_interval=timedelta(seconds=0.05), # 0.1 → 0.05 (50% faster) + max_number_of_attempts=3, + backoff_coefficient=1, + max_retry_interval=timedelta(seconds=0.5), # 1 → 0.5 + retry_timeout=timedelta(seconds=2), # 3 → 2 + ) + + def parent_orchestrator_with_retry(ctx: task.OrchestrationContext, _): + yield ctx.call_sub_orchestrator(child_orchestrator_with_retry, retry_policy=retry_policy) + + def child_orchestrator_with_retry(ctx: task.OrchestrationContext, _): + nonlocal child_orch_counter + if not ctx.is_replaying: + # NOTE: Real orchestrations should never interact with nonlocal variables like this. + # This is done only for testing purposes. + child_orch_counter += 1 + yield ctx.call_activity(throw_activity_with_retry, retry_policy=retry_policy) + + def throw_activity_with_retry(ctx: task.ActivityContext, _): + nonlocal throw_activity_counter + throw_activity_counter += 1 + raise RuntimeError("Kah-BOOOOM!!!") + + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: + w.add_orchestrator(parent_orchestrator_with_retry) + w.add_orchestrator(child_orchestrator_with_retry) + w.add_activity(throw_activity_with_retry) + w.start() + + task_hub_client = client.TaskHubGrpcClient() + id = task_hub_client.schedule_new_orchestration(parent_orchestrator_with_retry) + state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) + assert state is not None + assert state.runtime_status == client.OrchestrationStatus.FAILED + assert state.failure_details is not None + assert state.failure_details.error_type == "TaskFailedError" + assert state.failure_details.message.startswith("Sub-orchestration task #1 failed:") + assert state.failure_details.message.endswith("Activity task #1 failed: Kah-BOOOOM!!!") + assert state.failure_details.stack_trace is not None + assert throw_activity_counter == 9 + assert child_orch_counter == 3 + + # Test 2: Verify NonRetryableError prevents retries even with retry policy + non_retryable_counter = 0 + + def throw_non_retryable(ctx: task.ActivityContext, _): + nonlocal non_retryable_counter + non_retryable_counter += 1 + raise task.NonRetryableError("Cannot retry this!") + + def orchestrator_with_non_retryable(ctx: task.OrchestrationContext, _): + # Even with retry policy, NonRetryableError should fail immediately + yield ctx.call_activity(throw_non_retryable, retry_policy=retry_policy) + + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: + w.add_orchestrator(orchestrator_with_non_retryable) + w.add_activity(throw_non_retryable) + w.start() + + task_hub_client = client.TaskHubGrpcClient() + id = task_hub_client.schedule_new_orchestration(orchestrator_with_non_retryable) + state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) + assert state is not None + assert state.runtime_status == client.OrchestrationStatus.FAILED + assert state.failure_details is not None + assert "Cannot retry this!" in state.failure_details.message + # Key assertion: activity was called exactly once (no retries) + assert non_retryable_counter == 1 + + +def test_retry_timeout(): + # This test verifies that the retry timeout is working as expected. + # Max number of attempts is 5 and retry timeout is 1.7 seconds. + # Delays: 0.25 + 0.5 + 1.0 = 1.75 seconds cumulative before 4th attempt. + # So, the 5th attempt (which would happen at 1.75s) should not be made. + throw_activity_counter = 0 + retry_policy = task.RetryPolicy( + first_retry_interval=timedelta(seconds=1), + max_number_of_attempts=5, + backoff_coefficient=2, + max_retry_interval=timedelta(seconds=10), + retry_timeout=timedelta(seconds=13), # Set just before 4th attempt + ) + + def mock_orchestrator(ctx: task.OrchestrationContext, _): + yield ctx.call_activity(throw_activity, retry_policy=retry_policy) + + def throw_activity(ctx: task.ActivityContext, _): + nonlocal throw_activity_counter + throw_activity_counter += 1 + raise RuntimeError("Kah-BOOOOM!!!") + + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: + w.add_orchestrator(mock_orchestrator) + w.add_activity(throw_activity) + w.start() + + task_hub_client = client.TaskHubGrpcClient() + id = task_hub_client.schedule_new_orchestration(mock_orchestrator) + state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) + assert state is not None + assert state.runtime_status == client.OrchestrationStatus.FAILED + assert state.failure_details is not None + assert state.failure_details.error_type == "TaskFailedError" + assert state.failure_details.message.endswith("Activity task #1 failed: Kah-BOOOOM!!!") + assert state.failure_details.stack_trace is not None + assert throw_activity_counter == 4 + + +def test_custom_status(): + def empty_orchestrator(ctx: task.OrchestrationContext, _): + ctx.set_custom_status("foobaz") + + # Start a worker, which will connect to the sidecar in a background thread + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: + w.add_orchestrator(empty_orchestrator) + w.start() + + c = client.TaskHubGrpcClient() + id = c.schedule_new_orchestration(empty_orchestrator) + state = c.wait_for_orchestration_completion(id, timeout=30) + + assert state is not None + assert state.name == task.get_name(empty_orchestrator) + assert state.instance_id == id + assert state.failure_details is None + assert state.runtime_status == client.OrchestrationStatus.COMPLETED + assert state.serialized_input is None + assert state.serialized_output is None + assert state.serialized_custom_status == '"foobaz"' + + +def test_now_with_sequence_ordering(): + """ + Test that now_with_sequence() maintains strict ordering across workflow execution. + + This verifies: + 1. Timestamps increment sequentially + 2. Order is preserved across activity calls + 3. Deterministic behavior (timestamps are consistent on replay) + """ + + def simple_activity(ctx, input_val: str): + return f"activity_{input_val}_done" + + def timestamp_ordering_workflow(ctx: task.OrchestrationContext, _): + timestamps = [] + + # First timestamp before any activities + t1 = ctx.now_with_sequence() + timestamps.append(("t1_before_activities", t1.isoformat())) + + # Call first activity + result1 = yield ctx.call_activity(simple_activity, input="first") + timestamps.append(("activity_1_result", result1)) + + # Timestamp after first activity + t2 = ctx.now_with_sequence() + timestamps.append(("t2_after_activity_1", t2.isoformat())) + + # Call second activity + result2 = yield ctx.call_activity(simple_activity, input="second") + timestamps.append(("activity_2_result", result2)) + + # Timestamp after second activity + t3 = ctx.now_with_sequence() + timestamps.append(("t3_after_activity_2", t3.isoformat())) + + # A few more rapid timestamps to test counter incrementing + t4 = ctx.now_with_sequence() + timestamps.append(("t4_rapid", t4.isoformat())) + + t5 = ctx.now_with_sequence() + timestamps.append(("t5_rapid", t5.isoformat())) + + # Return all timestamps for verification + return { + "timestamps": timestamps, + "t1": t1.isoformat(), + "t2": t2.isoformat(), + "t3": t3.isoformat(), + "t4": t4.isoformat(), + "t5": t5.isoformat(), + } + + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: + w.add_orchestrator(timestamp_ordering_workflow) + w.add_activity(simple_activity) + w.start() + + with client.TaskHubGrpcClient() as c: + instance_id = c.schedule_new_orchestration(timestamp_ordering_workflow) + state = c.wait_for_orchestration_completion( + instance_id, timeout=30, fetch_payloads=True + ) + + assert state is not None + assert state.runtime_status == client.OrchestrationStatus.COMPLETED + assert state.failure_details is None + + # Parse result + result = json.loads(state.serialized_output) + assert result is not None + + # Verify all timestamps are present + assert "t1" in result + assert "t2" in result + assert "t3" in result + assert "t4" in result + assert "t5" in result + + # Parse timestamps back to datetime objects for comparison + from datetime import datetime + + t1 = datetime.fromisoformat(result["t1"]) + t2 = datetime.fromisoformat(result["t2"]) + t3 = datetime.fromisoformat(result["t3"]) + t4 = datetime.fromisoformat(result["t4"]) + t5 = datetime.fromisoformat(result["t5"]) + + # Verify strict ordering: t1 < t2 < t3 < t4 < t5 + # This is the key guarantee - timestamps must maintain order for tracing + assert t1 < t2, f"t1 ({t1}) should be < t2 ({t2})" + assert t2 < t3, f"t2 ({t2}) should be < t3 ({t3})" + assert t3 < t4, f"t3 ({t3}) should be < t4 ({t4})" + assert t4 < t5, f"t4 ({t4}) should be < t5 ({t5})" + + # Verify that timestamps called in rapid succession (t3, t4, t5 with no activities between) + # have exactly 1 microsecond deltas. These happen within the same replay execution. + delta_t3_t4 = (t4 - t3).total_seconds() * 1_000_000 + delta_t4_t5 = (t5 - t4).total_seconds() * 1_000_000 + + assert delta_t3_t4 == 1.0, f"t3 to t4 should be 1 microsecond, got {delta_t3_t4}" + assert delta_t4_t5 == 1.0, f"t4 to t5 should be 1 microsecond, got {delta_t4_t5}" + + # Note: We don't check exact deltas for t1->t2 or t2->t3 because they span + # activity calls. During replay, current_utc_datetime changes based on event + # timestamps, so the base time shifts. However, ordering is still guaranteed. + + +def test_cannot_add_orchestrator_while_running(): + """Test that orchestrators cannot be added while the worker is running.""" + + def orchestrator(ctx: task.OrchestrationContext, _): + return "done" + + def another_orchestrator(ctx: task.OrchestrationContext, _): + return "another" + + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: + w.add_orchestrator(orchestrator) + w.start() + + # Try to add another orchestrator while running + with pytest.raises( + RuntimeError, match="Orchestrators cannot be added while the worker is running" + ): + w.add_orchestrator(another_orchestrator) + + +def test_cannot_add_activity_while_running(): + """Test that activities cannot be added while the worker is running.""" + + def activity(ctx: task.ActivityContext, input): + return input + + def another_activity(ctx: task.ActivityContext, input): + return input * 2 + + def orchestrator(ctx: task.OrchestrationContext, _): + return "done" + + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: + w.add_orchestrator(orchestrator) + w.add_activity(activity) + w.start() + + # Try to add another activity while running + with pytest.raises( + RuntimeError, match="Activities cannot be added while the worker is running" + ): + w.add_activity(another_activity) + + +def test_can_add_functions_after_stop(): + """Test that orchestrators/activities can be added after stopping the worker.""" + + def orchestrator1(ctx: task.OrchestrationContext, _): + return "done" + + def orchestrator2(ctx: task.OrchestrationContext, _): + return "done2" + + def activity(ctx: task.ActivityContext, input): + return input + + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: + w.add_orchestrator(orchestrator1) + w.start() + + c = client.TaskHubGrpcClient() + id = c.schedule_new_orchestration(orchestrator1) + state = c.wait_for_orchestration_completion(id, timeout=30) + assert state is not None + assert state.runtime_status == client.OrchestrationStatus.COMPLETED + + # Should be able to add after stop + w.add_orchestrator(orchestrator2) + w.add_activity(activity) diff --git a/tests/durabletask/test_orchestration_e2e_async.py b/tests/durabletask/test_orchestration_e2e_async.py new file mode 100644 index 00000000..b71e70b1 --- /dev/null +++ b/tests/durabletask/test_orchestration_e2e_async.py @@ -0,0 +1,483 @@ +# Copyright (c) The Dapr Authors. +# Licensed under the MIT License. + +import asyncio +import json +import threading +from datetime import timedelta + +import pytest + +from durabletask import task, worker +from durabletask.aio.client import AsyncTaskHubGrpcClient +from durabletask.client import OrchestrationStatus + +# NOTE: These tests assume a sidecar process is running. Example command: +# go install github.com/dapr/durabletask-go@main +# durabletask-go --port 4001 +pytestmark = [pytest.mark.e2e, pytest.mark.asyncio] + + +async def test_empty_orchestration(): + invoked = False + + def empty_orchestrator(ctx: task.OrchestrationContext, _): + nonlocal invoked # don't do this in a real app! + invoked = True + + channel_options = [ + ("grpc.max_send_message_length", 1024 * 1024), # 1MB + ] + + # Start a worker, which will connect to the sidecar in a background thread + with worker.TaskHubGrpcWorker(channel_options=channel_options) as w: + w.add_orchestrator(empty_orchestrator) + w.start() + + c = AsyncTaskHubGrpcClient(channel_options=channel_options) + id = await c.schedule_new_orchestration(empty_orchestrator) + state = await c.wait_for_orchestration_completion(id, timeout=30) + await c.aclose() + + assert invoked + assert state is not None + assert state.name == task.get_name(empty_orchestrator) + assert state.instance_id == id + assert state.failure_details is None + assert state.runtime_status == OrchestrationStatus.COMPLETED + assert state.serialized_input is None + assert state.serialized_output is None + assert state.serialized_custom_status is None + + +async def test_activity_sequence(): + def plus_one(_: task.ActivityContext, input: int) -> int: + return input + 1 + + def sequence(ctx: task.OrchestrationContext, start_val: int): + numbers = [start_val] + current = start_val + for _ in range(10): + current = yield ctx.call_activity(plus_one, input=current) + numbers.append(current) + return numbers + + channel_options = [ + ("grpc.max_send_message_length", 1024 * 1024), # 1MB + ] + # Start a worker, which will connect to the sidecar in a background thread + with worker.TaskHubGrpcWorker(channel_options=channel_options) as w: + w.add_orchestrator(sequence) + w.add_activity(plus_one) + w.start() + + client = AsyncTaskHubGrpcClient(channel_options=channel_options) + id = await client.schedule_new_orchestration(sequence, input=1) + state = await client.wait_for_orchestration_completion(id, timeout=30) + await client.aclose() + + assert state is not None + assert state.name == task.get_name(sequence) + assert state.instance_id == id + assert state.runtime_status == OrchestrationStatus.COMPLETED + assert state.failure_details is None + assert state.serialized_input == json.dumps(1) + assert state.serialized_output == json.dumps([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) + assert state.serialized_custom_status is None + + +async def test_activity_error_handling(): + def throw(_: task.ActivityContext, input: int) -> int: + raise RuntimeError("Kah-BOOOOM!!!") + + compensation_counter = 0 + + def increment_counter(ctx, _): + nonlocal compensation_counter + compensation_counter += 1 + + def orchestrator(ctx: task.OrchestrationContext, input: int): + error_msg = "" + try: + yield ctx.call_activity(throw, input=input) + except task.TaskFailedError as e: + error_msg = e.details.message + + # compensating actions + yield ctx.call_activity(increment_counter) + yield ctx.call_activity(increment_counter) + + return error_msg + + # Start a worker, which will connect to the sidecar in a background thread + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: + w.add_orchestrator(orchestrator) + w.add_activity(throw) + w.add_activity(increment_counter) + w.start() + + client = AsyncTaskHubGrpcClient() + id = await client.schedule_new_orchestration(orchestrator, input=1) + state = await client.wait_for_orchestration_completion(id, timeout=30) + await client.aclose() + + assert state is not None + assert state.name == task.get_name(orchestrator) + assert state.instance_id == id + assert state.runtime_status == OrchestrationStatus.COMPLETED + assert state.serialized_output == json.dumps("Kah-BOOOOM!!!") + assert state.failure_details is None + assert state.serialized_custom_status is None + assert compensation_counter == 2 + + +async def test_sub_orchestration_fan_out(): + threadLock = threading.Lock() + activity_counter = 0 + + def increment(ctx, _): + with threadLock: + nonlocal activity_counter + activity_counter += 1 + + def orchestrator_child(ctx: task.OrchestrationContext, activity_count: int): + for _ in range(activity_count): + yield ctx.call_activity(increment) + + def parent_orchestrator(ctx: task.OrchestrationContext, count: int): + # Fan out to multiple sub-orchestrations + tasks = [] + for _ in range(count): + tasks.append(ctx.call_sub_orchestrator(orchestrator_child, input=3)) + # Wait for all sub-orchestrations to complete + yield task.when_all(tasks) + + # Start a worker, which will connect to the sidecar in a background thread + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: + w.add_activity(increment) + w.add_orchestrator(orchestrator_child) + w.add_orchestrator(parent_orchestrator) + w.start() + + client = AsyncTaskHubGrpcClient() + id = await client.schedule_new_orchestration(parent_orchestrator, input=10) + state = await client.wait_for_orchestration_completion(id, timeout=30) + await client.aclose() + + assert state is not None + assert state.runtime_status == OrchestrationStatus.COMPLETED + assert state.failure_details is None + assert activity_counter == 30 + + +async def test_wait_for_multiple_external_events(): + def orchestrator(ctx: task.OrchestrationContext, _): + a = yield ctx.wait_for_external_event("A") + b = yield ctx.wait_for_external_event("B") + c = yield ctx.wait_for_external_event("C") + return [a, b, c] + + # Start a worker, which will connect to the sidecar in a background thread + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: + w.add_orchestrator(orchestrator) + w.start() + + # Start the orchestration and immediately raise events to it. + client = AsyncTaskHubGrpcClient() + id = await client.schedule_new_orchestration(orchestrator) + await client.raise_orchestration_event(id, "A", data="a") + await client.raise_orchestration_event(id, "B", data="b") + await client.raise_orchestration_event(id, "C", data="c") + state = await client.wait_for_orchestration_completion(id, timeout=30) + await client.aclose() + + assert state is not None + assert state.runtime_status == OrchestrationStatus.COMPLETED + assert state.serialized_output == json.dumps(["a", "b", "c"]) + + +@pytest.mark.parametrize("raise_event", [True, False]) +async def test_wait_for_external_event_timeout(raise_event: bool): + def orchestrator(ctx: task.OrchestrationContext, _): + approval: task.Task[bool] = ctx.wait_for_external_event("Approval") + timeout = ctx.create_timer(timedelta(seconds=3)) + winner = yield task.when_any([approval, timeout]) + if winner == approval: + return "approved" + else: + return "timed out" + + # Start a worker, which will connect to the sidecar in a background thread + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: + w.add_orchestrator(orchestrator) + w.start() + + # Start the orchestration and immediately raise events to it. + client = AsyncTaskHubGrpcClient() + id = await client.schedule_new_orchestration(orchestrator) + if raise_event: + await client.raise_orchestration_event(id, "Approval") + state = await client.wait_for_orchestration_completion(id, timeout=30) + await client.aclose() + + assert state is not None + assert state.runtime_status == OrchestrationStatus.COMPLETED + if raise_event: + assert state.serialized_output == json.dumps("approved") + else: + assert state.serialized_output == json.dumps("timed out") + + +async def test_suspend_and_resume(): + def orchestrator(ctx: task.OrchestrationContext, _): + result = yield ctx.wait_for_external_event("my_event") + return result + + # Start a worker, which will connect to the sidecar in a background thread + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: + w.add_orchestrator(orchestrator) + w.start() + # there could be a race condition if the workflow is scheduled before orchestrator is started + await asyncio.sleep(0.2) + + async with AsyncTaskHubGrpcClient() as client: + id = await client.schedule_new_orchestration(orchestrator) + state = await client.wait_for_orchestration_start(id, timeout=30) + assert state is not None + + # Suspend the orchestration and wait for it to go into the SUSPENDED state + await client.suspend_orchestration(id) + while state.runtime_status == OrchestrationStatus.RUNNING: + await asyncio.sleep(0.1) + state = await client.get_orchestration_state(id) + assert state is not None + assert state.runtime_status == OrchestrationStatus.SUSPENDED + + # Raise an event to the orchestration and confirm that it does NOT complete + await client.raise_orchestration_event(id, "my_event", data=42) + try: + state = await client.wait_for_orchestration_completion(id, timeout=3) + assert False, "Orchestration should not have completed" + except TimeoutError: + pass + + # Resume the orchestration and wait for it to complete + await client.resume_orchestration(id) + state = await client.wait_for_orchestration_completion(id, timeout=30) + assert state is not None + assert state.runtime_status == OrchestrationStatus.COMPLETED + assert state.serialized_output == json.dumps(42) + + +async def test_terminate(): + def orchestrator(ctx: task.OrchestrationContext, _): + result = yield ctx.wait_for_external_event("my_event") + return result + + # Start a worker, which will connect to the sidecar in a background thread + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: + w.add_orchestrator(orchestrator) + w.start() + + async with AsyncTaskHubGrpcClient() as client: + id = await client.schedule_new_orchestration(orchestrator) + state = await client.wait_for_orchestration_start(id, timeout=30) + assert state is not None + assert state.runtime_status == OrchestrationStatus.RUNNING + + await client.terminate_orchestration(id, output="some reason for termination") + state = await client.wait_for_orchestration_completion(id, timeout=30) + assert state is not None + assert state.runtime_status == OrchestrationStatus.TERMINATED + assert state.serialized_output == json.dumps("some reason for termination") + + +async def test_terminate_recursive(): + def root(ctx: task.OrchestrationContext, _): + result = yield ctx.call_sub_orchestrator(child) + return result + + def child(ctx: task.OrchestrationContext, _): + result = yield ctx.wait_for_external_event("my_event") + return result + + # Start a worker, which will connect to the sidecar in a background thread + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: + w.add_orchestrator(root) + w.add_orchestrator(child) + w.start() + + async with AsyncTaskHubGrpcClient() as client: + id = await client.schedule_new_orchestration(root) + state = await client.wait_for_orchestration_start(id, timeout=30) + assert state is not None + assert state.runtime_status == OrchestrationStatus.RUNNING + + # Terminate root orchestration(recursive set to True by default) + await client.terminate_orchestration(id, output="some reason for termination") + state = await client.wait_for_orchestration_completion(id, timeout=30) + assert state is not None + assert state.runtime_status == OrchestrationStatus.TERMINATED + + # Verify that child orchestration is also terminated + await client.wait_for_orchestration_completion(id, timeout=30) + assert state is not None + assert state.runtime_status == OrchestrationStatus.TERMINATED + + await client.purge_orchestration(id) + state = await client.get_orchestration_state(id) + assert state is None + + +async def test_continue_as_new(): + all_results = [] + + def orchestrator(ctx: task.OrchestrationContext, input: int): + result = yield ctx.wait_for_external_event("my_event") + if not ctx.is_replaying: + # NOTE: Real orchestrations should never interact with nonlocal variables like this. + nonlocal all_results # noqa: F824 + all_results.append(result) + + if len(all_results) <= 4: + ctx.continue_as_new(max(all_results), save_events=True) + else: + return all_results + + # Start a worker, which will connect to the sidecar in a background thread + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: + w.add_orchestrator(orchestrator) + w.start() + + async with AsyncTaskHubGrpcClient() as client: + id = await client.schedule_new_orchestration(orchestrator, input=0) + await client.raise_orchestration_event(id, "my_event", data=1) + await client.raise_orchestration_event(id, "my_event", data=2) + await client.raise_orchestration_event(id, "my_event", data=3) + await client.raise_orchestration_event(id, "my_event", data=4) + await client.raise_orchestration_event(id, "my_event", data=5) + + state = await client.wait_for_orchestration_completion(id, timeout=30) + assert state is not None + assert state.runtime_status == OrchestrationStatus.COMPLETED + assert state.serialized_output == json.dumps(all_results) + assert state.serialized_input == json.dumps(4) + assert all_results == [1, 2, 3, 4, 5] + + +async def test_retry_policies(): + # This test verifies that the retry policies are working as expected. + # It does this by creating an orchestration that calls a sub-orchestrator, + # which in turn calls an activity that always fails. + # In this test, the retry policies are added, and the orchestration + # should still fail. But, number of times the sub-orchestrator and activity + # is called should increase as per the retry policies. + + child_orch_counter = 0 + throw_activity_counter = 0 + + # Second setup: With retry policies (minimal delays for faster tests) + retry_policy = task.RetryPolicy( + first_retry_interval=timedelta(seconds=0.05), # 0.1 → 0.05 (50% faster) + max_number_of_attempts=3, + backoff_coefficient=1, + max_retry_interval=timedelta(seconds=0.5), # 1 → 0.5 + retry_timeout=timedelta(seconds=2), # 3 → 2 + ) + + def parent_orchestrator_with_retry(ctx: task.OrchestrationContext, _): + yield ctx.call_sub_orchestrator(child_orchestrator_with_retry, retry_policy=retry_policy) + + def child_orchestrator_with_retry(ctx: task.OrchestrationContext, _): + nonlocal child_orch_counter + if not ctx.is_replaying: + # NOTE: Real orchestrations should never interact with nonlocal variables like this. + # This is done only for testing purposes. + child_orch_counter += 1 + yield ctx.call_activity(throw_activity_with_retry, retry_policy=retry_policy) + + def throw_activity_with_retry(ctx: task.ActivityContext, _): + nonlocal throw_activity_counter + throw_activity_counter += 1 + raise RuntimeError("Kah-BOOOOM!!!") + + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: + w.add_orchestrator(parent_orchestrator_with_retry) + w.add_orchestrator(child_orchestrator_with_retry) + w.add_activity(throw_activity_with_retry) + w.start() + + async with AsyncTaskHubGrpcClient() as client: + id = await client.schedule_new_orchestration(parent_orchestrator_with_retry) + state = await client.wait_for_orchestration_completion(id, timeout=30) + assert state is not None + assert state.runtime_status == OrchestrationStatus.FAILED + assert state.failure_details is not None + assert state.failure_details.error_type == "TaskFailedError" + assert state.failure_details.message.startswith("Sub-orchestration task #1 failed:") + assert state.failure_details.message.endswith("Activity task #1 failed: Kah-BOOOOM!!!") + assert state.failure_details.stack_trace is not None + assert throw_activity_counter == 9 + assert child_orch_counter == 3 + + +async def test_retry_timeout(): + # This test verifies that the retry timeout is working as expected. + # Max number of attempts is 5 and retry timeout is 1.7 seconds. + # Delays: 0.25 + 0.5 + 1.0 = 1.75 seconds cumulative before 4th attempt. + # So, the 5th attempt (which would happen at 1.75s) should not be made. + throw_activity_counter = 0 + retry_policy = task.RetryPolicy( + first_retry_interval=timedelta(seconds=1), + max_number_of_attempts=5, + backoff_coefficient=2, + max_retry_interval=timedelta(seconds=10), + retry_timeout=timedelta(seconds=13), # Set just before 4th attempt + ) + + def mock_orchestrator(ctx: task.OrchestrationContext, _): + yield ctx.call_activity(throw_activity, retry_policy=retry_policy) + + def throw_activity(ctx: task.ActivityContext, _): + nonlocal throw_activity_counter + throw_activity_counter += 1 + raise RuntimeError("Kah-BOOOOM!!!") + + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: + w.add_orchestrator(mock_orchestrator) + w.add_activity(throw_activity) + w.start() + + async with AsyncTaskHubGrpcClient() as client: + id = await client.schedule_new_orchestration(mock_orchestrator) + state = await client.wait_for_orchestration_completion(id, timeout=30) + assert state is not None + assert state.runtime_status == OrchestrationStatus.FAILED + assert state.failure_details is not None + assert state.failure_details.error_type == "TaskFailedError" + assert state.failure_details.message.endswith("Activity task #1 failed: Kah-BOOOOM!!!") + assert state.failure_details.stack_trace is not None + assert throw_activity_counter == 4 + + +async def test_custom_status(): + def empty_orchestrator(ctx: task.OrchestrationContext, _): + ctx.set_custom_status("foobaz") + + # Start a worker, which will connect to the sidecar in a background thread + with worker.TaskHubGrpcWorker(stop_timeout=2.0) as w: + w.add_orchestrator(empty_orchestrator) + w.start() + + async with AsyncTaskHubGrpcClient() as client: + id = await client.schedule_new_orchestration(empty_orchestrator) + state = await client.wait_for_orchestration_completion(id, timeout=30) + + assert state is not None + assert state.name == task.get_name(empty_orchestrator) + assert state.instance_id == id + assert state.failure_details is None + assert state.runtime_status == OrchestrationStatus.COMPLETED + assert state.serialized_input is None + assert state.serialized_output is None + assert state.serialized_custom_status == '"foobaz"' diff --git a/tests/test_orchestration_executor.py b/tests/durabletask/test_orchestration_executor.py similarity index 68% rename from tests/test_orchestration_executor.py rename to tests/durabletask/test_orchestration_executor.py index 95eab0b2..bf81f269 100644 --- a/tests/test_orchestration_executor.py +++ b/tests/durabletask/test_orchestration_executor.py @@ -3,8 +3,7 @@ import json import logging -from datetime import datetime, timedelta -from typing import List +from datetime import datetime, timedelta, timezone import pytest @@ -13,9 +12,10 @@ from durabletask import task, worker logging.basicConfig( - format='%(asctime)s.%(msecs)03d %(name)s %(levelname)s: %(message)s', - datefmt='%Y-%m-%d %H:%M:%S', - level=logging.DEBUG) + format="%(asctime)s.%(msecs)03d %(name)s %(levelname)s: %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", + level=logging.DEBUG, +) TEST_LOGGER = logging.getLogger("tests") TEST_INSTANCE_ID = "abc123" @@ -35,7 +35,9 @@ def orchestrator(ctx: task.OrchestrationContext, my_input: int): start_time = datetime.now() new_events = [ helpers.new_orchestrator_started_event(start_time), - helpers.new_execution_started_event(name, TEST_INSTANCE_ID, encoded_input=json.dumps(test_input)), + helpers.new_execution_started_event( + name, TEST_INSTANCE_ID, encoded_input=json.dumps(test_input) + ), ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, [], new_events) @@ -100,7 +102,8 @@ def delay_orchestrator(ctx: task.OrchestrationContext, _): new_events = [ helpers.new_orchestrator_started_event(start_time), - helpers.new_execution_started_event(name, TEST_INSTANCE_ID, encoded_input=None)] + helpers.new_execution_started_event(name, TEST_INSTANCE_ID, encoded_input=None), + ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, [], new_events) actions = result.actions @@ -130,9 +133,9 @@ def delay_orchestrator(ctx: task.OrchestrationContext, _): old_events = [ helpers.new_orchestrator_started_event(start_time), helpers.new_execution_started_event(name, TEST_INSTANCE_ID, encoded_input=None), - helpers.new_timer_created_event(1, expected_fire_at)] - new_events = [ - helpers.new_timer_fired_event(1, expected_fire_at)] + helpers.new_timer_created_event(1, expected_fire_at), + ] + new_events = [helpers.new_timer_fired_event(1, expected_fire_at)] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) @@ -146,6 +149,7 @@ def delay_orchestrator(ctx: task.OrchestrationContext, _): def test_schedule_activity_actions(): """Test the actions output for the call_activity orchestrator method""" + def dummy_activity(ctx, _): pass @@ -159,7 +163,8 @@ def orchestrator(ctx: task.OrchestrationContext, orchestrator_input): encoded_input = json.dumps(42) new_events = [ helpers.new_orchestrator_started_event(), - helpers.new_execution_started_event(name, TEST_INSTANCE_ID, encoded_input)] + helpers.new_execution_started_event(name, TEST_INSTANCE_ID, encoded_input), + ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, [], new_events) actions = result.actions @@ -172,6 +177,72 @@ def orchestrator(ctx: task.OrchestrationContext, orchestrator_input): assert actions[0].scheduleTask.input.value == encoded_input +def test_schedule_activity_actions_router_without_app_id(): + """Tests that scheduleTask action contains correct router fields when app_id is specified""" + + def dummy_activity(ctx, _): + pass + + def orchestrator(ctx: task.OrchestrationContext, _): + yield ctx.call_activity(dummy_activity, input=42) + + registry = worker._Registry() + name = registry.add_orchestrator(orchestrator) + + # Prepare execution started event with source app set on router + exec_evt = helpers.new_execution_started_event(name, TEST_INSTANCE_ID, encoded_input=None) + exec_evt.router.sourceAppID = "source-app" + + new_events = [ + helpers.new_orchestrator_started_event(), + exec_evt, + ] + + executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) + result = executor.execute(TEST_INSTANCE_ID, [], new_events) + actions = result.actions + + assert len(actions) == 1 + action = actions[0] + assert action.router.sourceAppID == "source-app" + assert action.router.targetAppID == "" + assert action.scheduleTask.router.sourceAppID == "source-app" + assert action.scheduleTask.router.targetAppID == "" + + +def test_schedule_activity_actions_router_with_app_id(): + """Tests that scheduleTask action contains correct router fields when app_id is specified""" + + def dummy_activity(ctx, _): + pass + + def orchestrator(ctx: task.OrchestrationContext, _): + yield ctx.call_activity(dummy_activity, input=42, app_id="target-app") + + registry = worker._Registry() + name = registry.add_orchestrator(orchestrator) + + # Prepare execution started event with source app set on router + exec_evt = helpers.new_execution_started_event(name, TEST_INSTANCE_ID, encoded_input=None) + exec_evt.router.sourceAppID = "source-app" + + new_events = [ + helpers.new_orchestrator_started_event(), + exec_evt, + ] + + executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) + result = executor.execute(TEST_INSTANCE_ID, [], new_events) + actions = result.actions + + assert len(actions) == 1 + action = actions[0] + assert action.router.sourceAppID == "source-app" + assert action.router.targetAppID == "target-app" + assert action.scheduleTask.router.sourceAppID == "source-app" + assert action.scheduleTask.router.targetAppID == "target-app" + + def test_activity_task_completion(): """Tests the successful completion of an activity task""" @@ -188,7 +259,8 @@ def orchestrator(ctx: task.OrchestrationContext, orchestrator_input): old_events = [ helpers.new_orchestrator_started_event(), helpers.new_execution_started_event(name, TEST_INSTANCE_ID, encoded_input=None), - helpers.new_task_scheduled_event(1, task.get_name(dummy_activity))] + helpers.new_task_scheduled_event(1, task.get_name(dummy_activity)), + ] encoded_output = json.dumps("done!") new_events = [helpers.new_task_completed_event(1, encoded_output)] @@ -204,6 +276,7 @@ def orchestrator(ctx: task.OrchestrationContext, orchestrator_input): def test_activity_task_failed(): """Tests the failure of an activity task""" + def dummy_activity(ctx, _): pass @@ -217,7 +290,8 @@ def orchestrator(ctx: task.OrchestrationContext, orchestrator_input): old_events = [ helpers.new_orchestrator_started_event(), helpers.new_execution_started_event(name, TEST_INSTANCE_ID, encoded_input=None), - helpers.new_task_scheduled_event(1, task.get_name(dummy_activity))] + helpers.new_task_scheduled_event(1, task.get_name(dummy_activity)), + ] ex = Exception("Kah-BOOOOM!!!") new_events = [helpers.new_task_failed_event(1, ex)] @@ -228,7 +302,9 @@ def orchestrator(ctx: task.OrchestrationContext, orchestrator_input): complete_action = get_and_validate_single_complete_orchestration_action(actions) assert complete_action.orchestrationStatus == pb.ORCHESTRATION_STATUS_FAILED - assert complete_action.failureDetails.errorType == 'TaskFailedError' # TODO: Should this be the specific error? + assert ( + complete_action.failureDetails.errorType == "TaskFailedError" + ) # TODO: Should this be the specific error? assert str(ex) in complete_action.failureDetails.errorMessage # Make sure the line of code where the exception was raised is included in the stack trace @@ -250,8 +326,10 @@ def orchestrator(ctx: task.OrchestrationContext, orchestrator_input): max_number_of_attempts=6, backoff_coefficient=2, max_retry_interval=timedelta(seconds=10), - retry_timeout=timedelta(seconds=50)), - input=orchestrator_input) + retry_timeout=timedelta(seconds=50), + ), + input=orchestrator_input, + ) return result registry = worker._Registry() @@ -262,12 +340,14 @@ def orchestrator(ctx: task.OrchestrationContext, orchestrator_input): old_events = [ helpers.new_orchestrator_started_event(timestamp=current_timestamp), helpers.new_execution_started_event(name, TEST_INSTANCE_ID, encoded_input=None), - helpers.new_task_scheduled_event(1, task.get_name(dummy_activity))] + helpers.new_task_scheduled_event(1, task.get_name(dummy_activity)), + ] expected_fire_at = current_timestamp + timedelta(seconds=1) new_events = [ helpers.new_orchestrator_started_event(timestamp=current_timestamp), - helpers.new_task_failed_event(1, ValueError("Kah-BOOOOM!!!"))] + helpers.new_task_failed_event(1, ValueError("Kah-BOOOOM!!!")), + ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) actions = result.actions @@ -281,7 +361,8 @@ def orchestrator(ctx: task.OrchestrationContext, orchestrator_input): old_events = old_events + new_events new_events = [ helpers.new_orchestrator_started_event(current_timestamp), - helpers.new_timer_fired_event(2, current_timestamp)] + helpers.new_timer_fired_event(2, current_timestamp), + ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) actions = result.actions @@ -294,7 +375,8 @@ def orchestrator(ctx: task.OrchestrationContext, orchestrator_input): expected_fire_at = current_timestamp + timedelta(seconds=2) new_events = [ helpers.new_orchestrator_started_event(current_timestamp), - helpers.new_task_failed_event(1, ValueError("Kah-BOOOOM!!!"))] + helpers.new_task_failed_event(1, ValueError("Kah-BOOOOM!!!")), + ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) actions = result.actions @@ -308,7 +390,8 @@ def orchestrator(ctx: task.OrchestrationContext, orchestrator_input): old_events = old_events + new_events new_events = [ helpers.new_orchestrator_started_event(current_timestamp), - helpers.new_timer_fired_event(3, current_timestamp)] + helpers.new_timer_fired_event(3, current_timestamp), + ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) actions = result.actions @@ -321,7 +404,8 @@ def orchestrator(ctx: task.OrchestrationContext, orchestrator_input): old_events = old_events + new_events new_events = [ helpers.new_orchestrator_started_event(current_timestamp), - helpers.new_task_failed_event(1, ValueError("Kah-BOOOOM!!!"))] + helpers.new_task_failed_event(1, ValueError("Kah-BOOOOM!!!")), + ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) actions = result.actions @@ -335,7 +419,8 @@ def orchestrator(ctx: task.OrchestrationContext, orchestrator_input): old_events = old_events + new_events new_events = [ helpers.new_orchestrator_started_event(current_timestamp), - helpers.new_timer_fired_event(4, current_timestamp)] + helpers.new_timer_fired_event(4, current_timestamp), + ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) actions = result.actions @@ -348,7 +433,8 @@ def orchestrator(ctx: task.OrchestrationContext, orchestrator_input): old_events = old_events + new_events new_events = [ helpers.new_orchestrator_started_event(current_timestamp), - helpers.new_task_failed_event(1, ValueError("Kah-BOOOOM!!!"))] + helpers.new_task_failed_event(1, ValueError("Kah-BOOOOM!!!")), + ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) actions = result.actions @@ -362,7 +448,8 @@ def orchestrator(ctx: task.OrchestrationContext, orchestrator_input): old_events = old_events + new_events new_events = [ helpers.new_orchestrator_started_event(current_timestamp), - helpers.new_timer_fired_event(5, current_timestamp)] + helpers.new_timer_fired_event(5, current_timestamp), + ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) actions = result.actions @@ -376,7 +463,8 @@ def orchestrator(ctx: task.OrchestrationContext, orchestrator_input): old_events = old_events + new_events new_events = [ helpers.new_orchestrator_started_event(current_timestamp), - helpers.new_task_failed_event(1, ValueError("Kah-BOOOOM!!!"))] + helpers.new_task_failed_event(1, ValueError("Kah-BOOOOM!!!")), + ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) actions = result.actions @@ -390,7 +478,8 @@ def orchestrator(ctx: task.OrchestrationContext, orchestrator_input): old_events = old_events + new_events new_events = [ helpers.new_orchestrator_started_event(current_timestamp), - helpers.new_timer_fired_event(6, current_timestamp)] + helpers.new_timer_fired_event(6, current_timestamp), + ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) actions = result.actions @@ -402,17 +491,21 @@ def orchestrator(ctx: task.OrchestrationContext, orchestrator_input): old_events = old_events + new_events new_events = [ helpers.new_orchestrator_started_event(current_timestamp), - helpers.new_task_failed_event(1, ValueError("Kah-BOOOOM!!!"))] + helpers.new_task_failed_event(1, ValueError("Kah-BOOOOM!!!")), + ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) actions = result.actions assert len(actions) == 1 - assert actions[0].completeOrchestration.failureDetails.errorMessage.__contains__("Activity task #1 failed: Kah-BOOOOM!!!") + assert actions[0].completeOrchestration.failureDetails.errorMessage.__contains__( + "Activity task #1 failed: Kah-BOOOOM!!!" + ) assert actions[0].id == 7 def test_nondeterminism_expected_timer(): """Tests the non-determinism detection logic when call_timer is expected but some other method (call_activity) is called instead""" + def dummy_activity(ctx, _): pass @@ -427,7 +520,8 @@ def orchestrator(ctx: task.OrchestrationContext, _): old_events = [ helpers.new_orchestrator_started_event(), helpers.new_execution_started_event(name, TEST_INSTANCE_ID, encoded_input=None), - helpers.new_timer_created_event(1, fire_at)] + helpers.new_timer_created_event(1, fire_at), + ] new_events = [helpers.new_timer_fired_event(timer_id=1, fire_at=fire_at)] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) @@ -436,7 +530,7 @@ def orchestrator(ctx: task.OrchestrationContext, _): complete_action = get_and_validate_single_complete_orchestration_action(actions) assert complete_action.orchestrationStatus == pb.ORCHESTRATION_STATUS_FAILED - assert complete_action.failureDetails.errorType == 'NonDeterminismError' + assert complete_action.failureDetails.errorType == "NonDeterminismError" assert "1" in complete_action.failureDetails.errorMessage # task ID assert "create_timer" in complete_action.failureDetails.errorMessage # expected method name assert "call_activity" in complete_action.failureDetails.errorMessage # actual method name @@ -444,6 +538,7 @@ def orchestrator(ctx: task.OrchestrationContext, _): def test_nondeterminism_expected_activity_call_no_task_id(): """Tests the non-determinism detection logic when invoking activity functions""" + def orchestrator(ctx: task.OrchestrationContext, _): result = yield task.CompletableTask() # dummy task return result @@ -454,7 +549,8 @@ def orchestrator(ctx: task.OrchestrationContext, _): old_events = [ helpers.new_orchestrator_started_event(), helpers.new_execution_started_event(name, TEST_INSTANCE_ID, encoded_input=None), - helpers.new_task_scheduled_event(1, "bogus_activity")] + helpers.new_task_scheduled_event(1, "bogus_activity"), + ] new_events = [helpers.new_task_completed_event(1)] @@ -464,13 +560,14 @@ def orchestrator(ctx: task.OrchestrationContext, _): complete_action = get_and_validate_single_complete_orchestration_action(actions) assert complete_action.orchestrationStatus == pb.ORCHESTRATION_STATUS_FAILED - assert complete_action.failureDetails.errorType == 'NonDeterminismError' + assert complete_action.failureDetails.errorType == "NonDeterminismError" assert "1" in complete_action.failureDetails.errorMessage # task ID assert "call_activity" in complete_action.failureDetails.errorMessage # expected method name def test_nondeterminism_expected_activity_call_wrong_task_type(): """Tests the non-determinism detection when an activity exists in the history but a non-activity is in the code""" + def dummy_activity(ctx, _): pass @@ -484,7 +581,8 @@ def orchestrator(ctx: task.OrchestrationContext, _): old_events = [ helpers.new_orchestrator_started_event(), helpers.new_execution_started_event(name, TEST_INSTANCE_ID, encoded_input=None), - helpers.new_task_scheduled_event(1, task.get_name(dummy_activity))] + helpers.new_task_scheduled_event(1, task.get_name(dummy_activity)), + ] new_events = [helpers.new_task_completed_event(1)] @@ -494,7 +592,7 @@ def orchestrator(ctx: task.OrchestrationContext, _): complete_action = get_and_validate_single_complete_orchestration_action(actions) assert complete_action.orchestrationStatus == pb.ORCHESTRATION_STATUS_FAILED - assert complete_action.failureDetails.errorType == 'NonDeterminismError' + assert complete_action.failureDetails.errorType == "NonDeterminismError" assert "1" in complete_action.failureDetails.errorMessage # task ID assert "call_activity" in complete_action.failureDetails.errorMessage # expected method name assert "create_timer" in complete_action.failureDetails.errorMessage # unexpected method name @@ -502,6 +600,7 @@ def orchestrator(ctx: task.OrchestrationContext, _): def test_nondeterminism_wrong_activity_name(): """Tests the non-determinism detection when calling an activity with a name that differs from the name in the history""" + def dummy_activity(ctx, _): pass @@ -515,7 +614,8 @@ def orchestrator(ctx: task.OrchestrationContext, _): old_events = [ helpers.new_orchestrator_started_event(), helpers.new_execution_started_event(name, TEST_INSTANCE_ID, encoded_input=None), - helpers.new_task_scheduled_event(1, "original_activity")] + helpers.new_task_scheduled_event(1, "original_activity"), + ] new_events = [helpers.new_task_completed_event(1)] @@ -525,15 +625,20 @@ def orchestrator(ctx: task.OrchestrationContext, _): complete_action = get_and_validate_single_complete_orchestration_action(actions) assert complete_action.orchestrationStatus == pb.ORCHESTRATION_STATUS_FAILED - assert complete_action.failureDetails.errorType == 'NonDeterminismError' + assert complete_action.failureDetails.errorType == "NonDeterminismError" assert "1" in complete_action.failureDetails.errorMessage # task ID assert "call_activity" in complete_action.failureDetails.errorMessage # expected method name - assert "original_activity" in complete_action.failureDetails.errorMessage # expected activity name - assert "dummy_activity" in complete_action.failureDetails.errorMessage # unexpected activity name + assert ( + "original_activity" in complete_action.failureDetails.errorMessage + ) # expected activity name + assert ( + "dummy_activity" in complete_action.failureDetails.errorMessage + ) # unexpected activity name def test_sub_orchestration_task_completion(): """Tests that a sub-orchestration task is completed when the sub-orchestration completes""" + def suborchestrator(ctx: task.OrchestrationContext, _): pass @@ -547,11 +652,15 @@ def orchestrator(ctx: task.OrchestrationContext, _): old_events = [ helpers.new_orchestrator_started_event(), - helpers.new_execution_started_event(orchestrator_name, TEST_INSTANCE_ID, encoded_input=None), - helpers.new_sub_orchestration_created_event(1, suborchestrator_name, "sub-orch-123", encoded_input=None)] + helpers.new_execution_started_event( + orchestrator_name, TEST_INSTANCE_ID, encoded_input=None + ), + helpers.new_sub_orchestration_created_event( + 1, suborchestrator_name, "sub-orch-123", encoded_input=None + ), + ] - new_events = [ - helpers.new_sub_orchestration_completed_event(1, encoded_output="42")] + new_events = [helpers.new_sub_orchestration_completed_event(1, encoded_output="42")] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) @@ -562,8 +671,79 @@ def orchestrator(ctx: task.OrchestrationContext, _): assert complete_action.result.value == "42" +def test_create_sub_orchestration_actions_router_without_app_id(): + """Tests that createSubOrchestration action contains correct router fields when app_id is specified""" + + def suborchestrator(ctx: task.OrchestrationContext, _): + pass + + def orchestrator(ctx: task.OrchestrationContext, _): + yield ctx.call_sub_orchestrator(suborchestrator, input=None) + + registry = worker._Registry() + registry.add_orchestrator(suborchestrator) + orchestrator_name = registry.add_orchestrator(orchestrator) + + exec_evt = helpers.new_execution_started_event( + orchestrator_name, TEST_INSTANCE_ID, encoded_input=None + ) + exec_evt.router.sourceAppID = "source-app" + + new_events = [ + helpers.new_orchestrator_started_event(), + exec_evt, + ] + + executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) + result = executor.execute(TEST_INSTANCE_ID, [], new_events) + actions = result.actions + + assert len(actions) == 1 + action = actions[0] + assert action.router.sourceAppID == "source-app" + assert action.router.targetAppID == "" + assert action.createSubOrchestration.router.sourceAppID == "source-app" + assert action.createSubOrchestration.router.targetAppID == "" + + +def test_create_sub_orchestration_actions_router_with_app_id(): + """Tests that createSubOrchestration action contains correct router fields when app_id is specified""" + + def suborchestrator(ctx: task.OrchestrationContext, _): + pass + + def orchestrator(ctx: task.OrchestrationContext, _): + yield ctx.call_sub_orchestrator(suborchestrator, input=None, app_id="target-app") + + registry = worker._Registry() + registry.add_orchestrator(suborchestrator) + orchestrator_name = registry.add_orchestrator(orchestrator) + + exec_evt = helpers.new_execution_started_event( + orchestrator_name, TEST_INSTANCE_ID, encoded_input=None + ) + exec_evt.router.sourceAppID = "source-app" + + new_events = [ + helpers.new_orchestrator_started_event(), + exec_evt, + ] + + executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) + result = executor.execute(TEST_INSTANCE_ID, [], new_events) + actions = result.actions + + assert len(actions) == 1 + action = actions[0] + assert action.router.sourceAppID == "source-app" + assert action.router.targetAppID == "target-app" + assert action.createSubOrchestration.router.sourceAppID == "source-app" + assert action.createSubOrchestration.router.targetAppID == "target-app" + + def test_sub_orchestration_task_failed(): """Tests that a sub-orchestration task is completed when the sub-orchestration fails""" + def suborchestrator(ctx: task.OrchestrationContext, _): pass @@ -577,8 +757,13 @@ def orchestrator(ctx: task.OrchestrationContext, _): old_events = [ helpers.new_orchestrator_started_event(), - helpers.new_execution_started_event(orchestrator_name, TEST_INSTANCE_ID, encoded_input=None), - helpers.new_sub_orchestration_created_event(1, suborchestrator_name, "sub-orch-123", encoded_input=None)] + helpers.new_execution_started_event( + orchestrator_name, TEST_INSTANCE_ID, encoded_input=None + ), + helpers.new_sub_orchestration_created_event( + 1, suborchestrator_name, "sub-orch-123", encoded_input=None + ), + ] ex = Exception("Kah-BOOOOM!!!") new_events = [helpers.new_sub_orchestration_failed_event(1, ex)] @@ -589,7 +774,9 @@ def orchestrator(ctx: task.OrchestrationContext, _): complete_action = get_and_validate_single_complete_orchestration_action(actions) assert complete_action.orchestrationStatus == pb.ORCHESTRATION_STATUS_FAILED - assert complete_action.failureDetails.errorType == 'TaskFailedError' # TODO: Should this be the specific error? + assert ( + complete_action.failureDetails.errorType == "TaskFailedError" + ) # TODO: Should this be the specific error? assert str(ex) in complete_action.failureDetails.errorMessage # Make sure the line of code where the exception was raised is included in the stack trace @@ -599,6 +786,7 @@ def orchestrator(ctx: task.OrchestrationContext, _): def test_nondeterminism_expected_sub_orchestration_task_completion_no_task(): """Tests the non-determinism detection when a sub-orchestration action is encounteed when it shouldn't be""" + def orchestrator(ctx: task.OrchestrationContext, _): result = yield task.CompletableTask() # dummy task return result @@ -608,11 +796,15 @@ def orchestrator(ctx: task.OrchestrationContext, _): old_events = [ helpers.new_orchestrator_started_event(), - helpers.new_execution_started_event(orchestrator_name, TEST_INSTANCE_ID, encoded_input=None), - helpers.new_sub_orchestration_created_event(1, "some_sub_orchestration", "sub-orch-123", encoded_input=None)] + helpers.new_execution_started_event( + orchestrator_name, TEST_INSTANCE_ID, encoded_input=None + ), + helpers.new_sub_orchestration_created_event( + 1, "some_sub_orchestration", "sub-orch-123", encoded_input=None + ), + ] - new_events = [ - helpers.new_sub_orchestration_completed_event(1, encoded_output="42")] + new_events = [helpers.new_sub_orchestration_completed_event(1, encoded_output="42")] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) @@ -620,17 +812,22 @@ def orchestrator(ctx: task.OrchestrationContext, _): complete_action = get_and_validate_single_complete_orchestration_action(actions) assert complete_action.orchestrationStatus == pb.ORCHESTRATION_STATUS_FAILED - assert complete_action.failureDetails.errorType == 'NonDeterminismError' + assert complete_action.failureDetails.errorType == "NonDeterminismError" assert "1" in complete_action.failureDetails.errorMessage # task ID - assert "call_sub_orchestrator" in complete_action.failureDetails.errorMessage # expected method name + assert ( + "call_sub_orchestrator" in complete_action.failureDetails.errorMessage + ) # expected method name def test_nondeterminism_expected_sub_orchestration_task_completion_wrong_task_type(): """Tests the non-determinism detection when a sub-orchestration action is encounteed when it shouldn't be. This variation tests the case where the expected task type is wrong (e.g. the code schedules a timer task but the history contains a sub-orchestration completed task).""" + def orchestrator(ctx: task.OrchestrationContext, _): - result = yield ctx.create_timer(datetime.utcnow()) # created timer but history expects sub-orchestration + result = yield ctx.create_timer( + datetime.now(timezone.utc) + ) # created timer but history expects sub-orchestration return result registry = worker._Registry() @@ -638,11 +835,15 @@ def orchestrator(ctx: task.OrchestrationContext, _): old_events = [ helpers.new_orchestrator_started_event(), - helpers.new_execution_started_event(orchestrator_name, TEST_INSTANCE_ID, encoded_input=None), - helpers.new_sub_orchestration_created_event(1, "some_sub_orchestration", "sub-orch-123", encoded_input=None)] + helpers.new_execution_started_event( + orchestrator_name, TEST_INSTANCE_ID, encoded_input=None + ), + helpers.new_sub_orchestration_created_event( + 1, "some_sub_orchestration", "sub-orch-123", encoded_input=None + ), + ] - new_events = [ - helpers.new_sub_orchestration_completed_event(1, encoded_output="42")] + new_events = [helpers.new_sub_orchestration_completed_event(1, encoded_output="42")] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) @@ -650,13 +851,16 @@ def orchestrator(ctx: task.OrchestrationContext, _): complete_action = get_and_validate_single_complete_orchestration_action(actions) assert complete_action.orchestrationStatus == pb.ORCHESTRATION_STATUS_FAILED - assert complete_action.failureDetails.errorType == 'NonDeterminismError' + assert complete_action.failureDetails.errorType == "NonDeterminismError" assert "1" in complete_action.failureDetails.errorMessage # task ID - assert "call_sub_orchestrator" in complete_action.failureDetails.errorMessage # expected method name + assert ( + "call_sub_orchestrator" in complete_action.failureDetails.errorMessage + ) # expected method name def test_raise_event(): """Tests that an orchestration can wait for and process an external event sent by a client""" + def orchestrator(ctx: task.OrchestrationContext, _): result = yield ctx.wait_for_external_event("my_event") return result @@ -667,7 +871,8 @@ def orchestrator(ctx: task.OrchestrationContext, _): old_events = [] new_events = [ helpers.new_orchestrator_started_event(), - helpers.new_execution_started_event(orchestrator_name, TEST_INSTANCE_ID)] + helpers.new_execution_started_event(orchestrator_name, TEST_INSTANCE_ID), + ] # Execute the orchestration until it is waiting for an external event. The result # should be an empty list of actions because the orchestration didn't schedule any work. @@ -690,6 +895,7 @@ def orchestrator(ctx: task.OrchestrationContext, _): def test_raise_event_buffered(): """Tests that an orchestration can receive an event that arrives earlier than expected""" + def orchestrator(ctx: task.OrchestrationContext, _): yield ctx.create_timer(ctx.current_utc_datetime + timedelta(days=1)) result = yield ctx.wait_for_external_event("my_event") @@ -702,7 +908,8 @@ def orchestrator(ctx: task.OrchestrationContext, _): new_events = [ helpers.new_orchestrator_started_event(), helpers.new_execution_started_event(orchestrator_name, TEST_INSTANCE_ID), - helpers.new_event_raised_event("my_event", encoded_input="42")] + helpers.new_event_raised_event("my_event", encoded_input="42"), + ] # Execute the orchestration. It should be in a running state waiting for the timer to fire executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) @@ -713,7 +920,7 @@ def orchestrator(ctx: task.OrchestrationContext, _): # Complete the timer task. The orchestration should move to the wait_for_external_event step, which # should then complete immediately because the event was buffered in the old event history. - timer_due_time = datetime.utcnow() + timedelta(days=1) + timer_due_time = datetime.now(timezone.utc) + timedelta(days=1) old_events = new_events + [helpers.new_timer_created_event(1, timer_due_time)] new_events = [helpers.new_timer_fired_event(1, timer_due_time)] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) @@ -736,10 +943,12 @@ def orchestrator(ctx: task.OrchestrationContext, _): old_events = [ helpers.new_orchestrator_started_event(), - helpers.new_execution_started_event(orchestrator_name, TEST_INSTANCE_ID)] + helpers.new_execution_started_event(orchestrator_name, TEST_INSTANCE_ID), + ] new_events = [ helpers.new_suspend_event(), - helpers.new_event_raised_event("my_event", encoded_input="42")] + helpers.new_event_raised_event("my_event", encoded_input="42"), + ] # Execute the orchestration. It should remain in a running state because it was suspended prior # to processing the event raised event. @@ -771,10 +980,12 @@ def orchestrator(ctx: task.OrchestrationContext, _): old_events = [ helpers.new_orchestrator_started_event(), - helpers.new_execution_started_event(orchestrator_name, TEST_INSTANCE_ID)] + helpers.new_execution_started_event(orchestrator_name, TEST_INSTANCE_ID), + ] new_events = [ helpers.new_terminated_event(encoded_output=json.dumps("terminated!")), - helpers.new_event_raised_event("my_event", encoded_input="42")] + helpers.new_event_raised_event("my_event", encoded_input="42"), + ] # Execute the orchestration. It should be in a running state waiting for an external event executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) @@ -788,6 +999,7 @@ def orchestrator(ctx: task.OrchestrationContext, _): @pytest.mark.parametrize("save_events", [True, False]) def test_continue_as_new(save_events: bool): """Tests the behavior of the continue-as-new API""" + def orchestrator(ctx: task.OrchestrationContext, input: int): yield ctx.create_timer(ctx.current_utc_datetime + timedelta(days=1)) ctx.continue_as_new(input + 1, save_events=save_events) @@ -801,9 +1013,9 @@ def orchestrator(ctx: task.OrchestrationContext, input: int): helpers.new_event_raised_event("my_event", encoded_input="42"), helpers.new_event_raised_event("my_event", encoded_input="43"), helpers.new_event_raised_event("my_event", encoded_input="44"), - helpers.new_timer_created_event(1, datetime.utcnow() + timedelta(days=1))] - new_events = [ - helpers.new_timer_fired_event(1, datetime.utcnow() + timedelta(days=1))] + helpers.new_timer_created_event(1, datetime.now(timezone.utc) + timedelta(days=1)), + ] + new_events = [helpers.new_timer_fired_event(1, datetime.now(timezone.utc) + timedelta(days=1))] # Execute the orchestration. It should be in a running state waiting for the timer to fire executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) @@ -817,12 +1029,15 @@ def orchestrator(ctx: task.OrchestrationContext, input: int): event = complete_action.carryoverEvents[i] assert type(event) is pb.HistoryEvent assert event.HasField("eventRaised") - assert event.eventRaised.name.casefold() == "my_event".casefold() # event names are case-insensitive + assert ( + event.eventRaised.name.casefold() == "my_event".casefold() + ) # event names are case-insensitive assert event.eventRaised.input.value == json.dumps(42 + i) def test_fan_out(): """Tests that a fan-out pattern correctly schedules N tasks""" + def hello(_, name: str): return f"Hello {name}!" @@ -840,7 +1055,10 @@ def orchestrator(ctx: task.OrchestrationContext, count: int): old_events = [] new_events = [ helpers.new_orchestrator_started_event(), - helpers.new_execution_started_event(orchestrator_name, TEST_INSTANCE_ID, encoded_input="10")] + helpers.new_execution_started_event( + orchestrator_name, TEST_INSTANCE_ID, encoded_input="10" + ), + ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) @@ -856,6 +1074,7 @@ def orchestrator(ctx: task.OrchestrationContext, count: int): def test_fan_in(): """Tests that a fan-in pattern works correctly""" + def print_int(_, val: int): return str(val) @@ -872,15 +1091,20 @@ def orchestrator(ctx: task.OrchestrationContext, _): old_events = [ helpers.new_orchestrator_started_event(), - helpers.new_execution_started_event(orchestrator_name, TEST_INSTANCE_ID, encoded_input=None)] + helpers.new_execution_started_event( + orchestrator_name, TEST_INSTANCE_ID, encoded_input=None + ), + ] for i in range(10): - old_events.append(helpers.new_task_scheduled_event( - i + 1, activity_name, encoded_input=str(i))) + old_events.append( + helpers.new_task_scheduled_event(i + 1, activity_name, encoded_input=str(i)) + ) new_events = [] for i in range(10): - new_events.append(helpers.new_task_completed_event( - i + 1, encoded_output=print_int(None, i))) + new_events.append( + helpers.new_task_completed_event(i + 1, encoded_output=print_int(None, i)) + ) # First, test with only the first 5 events. We expect the orchestration to be running # but return zero actions since its still waiting for the other 5 tasks to complete. @@ -901,6 +1125,7 @@ def orchestrator(ctx: task.OrchestrationContext, _): def test_fan_in_with_single_failure(): """Tests that a fan-in pattern works correctly when one of the tasks fails""" + def print_int(_, val: int): return str(val) @@ -917,17 +1142,22 @@ def orchestrator(ctx: task.OrchestrationContext, _): old_events = [ helpers.new_orchestrator_started_event(), - helpers.new_execution_started_event(orchestrator_name, TEST_INSTANCE_ID, encoded_input=None)] + helpers.new_execution_started_event( + orchestrator_name, TEST_INSTANCE_ID, encoded_input=None + ), + ] for i in range(10): - old_events.append(helpers.new_task_scheduled_event( - i + 1, activity_name, encoded_input=str(i))) + old_events.append( + helpers.new_task_scheduled_event(i + 1, activity_name, encoded_input=str(i)) + ) # 5 of the tasks complete successfully, 1 fails, and 4 are still running. # The expectation is that the orchestration will fail immediately. new_events = [] for i in range(5): - new_events.append(helpers.new_task_completed_event( - i + 1, encoded_output=print_int(None, i))) + new_events.append( + helpers.new_task_completed_event(i + 1, encoded_output=print_int(None, i)) + ) ex = Exception("Kah-BOOOOM!!!") new_events.append(helpers.new_task_failed_event(6, ex)) @@ -938,12 +1168,15 @@ def orchestrator(ctx: task.OrchestrationContext, _): complete_action = get_and_validate_single_complete_orchestration_action(actions) assert complete_action.orchestrationStatus == pb.ORCHESTRATION_STATUS_FAILED - assert complete_action.failureDetails.errorType == 'TaskFailedError' # TODO: Is this the right error type? + assert ( + complete_action.failureDetails.errorType == "TaskFailedError" + ) # TODO: Is this the right error type? assert str(ex) in complete_action.failureDetails.errorMessage def test_when_any(): """Tests that a when_any pattern works correctly""" + def hello(_, name: str): return f"Hello {name}!" @@ -963,20 +1196,25 @@ def orchestrator(ctx: task.OrchestrationContext, _): # Test 1: Start the orchestration and let it yield on the when_any. We expect the orchestration # to return two actions: one to schedule the "Tokyo" task and one to schedule the "Seattle" task. old_events = [] - new_events = [helpers.new_execution_started_event(orchestrator_name, TEST_INSTANCE_ID, encoded_input=None)] + new_events = [ + helpers.new_execution_started_event(orchestrator_name, TEST_INSTANCE_ID, encoded_input=None) + ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) actions = result.actions assert len(actions) == 2 - assert actions[0].HasField('scheduleTask') - assert actions[1].HasField('scheduleTask') + assert actions[0].HasField("scheduleTask") + assert actions[1].HasField("scheduleTask") # The next tests assume that the orchestration has already awaited at the task.when_any() old_events = [ helpers.new_orchestrator_started_event(), - helpers.new_execution_started_event(orchestrator_name, TEST_INSTANCE_ID, encoded_input=None), + helpers.new_execution_started_event( + orchestrator_name, TEST_INSTANCE_ID, encoded_input=None + ), helpers.new_task_scheduled_event(1, activity_name, encoded_input=json.dumps("Tokyo")), - helpers.new_task_scheduled_event(2, activity_name, encoded_input=json.dumps("Seattle"))] + helpers.new_task_scheduled_event(2, activity_name, encoded_input=json.dumps("Seattle")), + ] # Test 2: Complete the "Tokyo" task. We expect the orchestration to complete with output "Hello, Tokyo!" encoded_output = json.dumps(hello(None, "Tokyo")) @@ -1001,20 +1239,24 @@ def orchestrator(ctx: task.OrchestrationContext, _): def test_when_any_with_retry(): """Tests that a when_any pattern works correctly with retries""" + def dummy_activity(_, inp: str): if inp == "Tokyo": raise ValueError("Kah-BOOOOM!!!") return f"Hello {inp}!" def orchestrator(ctx: task.OrchestrationContext, _): - t1 = ctx.call_activity(dummy_activity, - retry_policy=task.RetryPolicy( - first_retry_interval=timedelta(seconds=1), - max_number_of_attempts=6, - backoff_coefficient=2, - max_retry_interval=timedelta(seconds=10), - retry_timeout=timedelta(seconds=50)), - input="Tokyo") + t1 = ctx.call_activity( + dummy_activity, + retry_policy=task.RetryPolicy( + first_retry_interval=timedelta(seconds=1), + max_number_of_attempts=6, + backoff_coefficient=2, + max_retry_interval=timedelta(seconds=10), + retry_timeout=timedelta(seconds=50), + ), + input="Tokyo", + ) t2 = ctx.call_activity(dummy_activity, input="Seattle") winner = yield task.when_any([t1, t2]) if winner == t1: @@ -1030,14 +1272,18 @@ def orchestrator(ctx: task.OrchestrationContext, _): # Simulate the task failing for the first time and confirm that a timer is scheduled for 1 second in the future old_events = [ helpers.new_orchestrator_started_event(timestamp=current_timestamp), - helpers.new_execution_started_event(orchestrator_name, TEST_INSTANCE_ID, encoded_input=None), + helpers.new_execution_started_event( + orchestrator_name, TEST_INSTANCE_ID, encoded_input=None + ), helpers.new_task_scheduled_event(1, task.get_name(dummy_activity)), - helpers.new_task_scheduled_event(2, task.get_name(dummy_activity))] + helpers.new_task_scheduled_event(2, task.get_name(dummy_activity)), + ] expected_fire_at = current_timestamp + timedelta(seconds=1) new_events = [ helpers.new_orchestrator_started_event(timestamp=current_timestamp), - helpers.new_task_failed_event(1, ValueError("Kah-BOOOOM!!!"))] + helpers.new_task_failed_event(1, ValueError("Kah-BOOOOM!!!")), + ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) actions = result.actions @@ -1051,7 +1297,8 @@ def orchestrator(ctx: task.OrchestrationContext, _): old_events = old_events + new_events new_events = [ helpers.new_orchestrator_started_event(current_timestamp), - helpers.new_timer_fired_event(3, current_timestamp)] + helpers.new_timer_fired_event(3, current_timestamp), + ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) actions = result.actions @@ -1064,7 +1311,8 @@ def orchestrator(ctx: task.OrchestrationContext, _): expected_fire_at = current_timestamp + timedelta(seconds=2) new_events = [ helpers.new_orchestrator_started_event(current_timestamp), - helpers.new_task_failed_event(1, ValueError("Kah-BOOOOM!!!"))] + helpers.new_task_failed_event(1, ValueError("Kah-BOOOOM!!!")), + ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) actions = result.actions @@ -1086,20 +1334,24 @@ def orchestrator(ctx: task.OrchestrationContext, _): def test_when_all_with_retry(): """Tests that a when_all pattern works correctly with retries""" + def dummy_activity(ctx, inp: str): if inp == "Tokyo": raise ValueError("Kah-BOOOOM!!!") return f"Hello {inp}!" def orchestrator(ctx: task.OrchestrationContext, _): - t1 = ctx.call_activity(dummy_activity, - retry_policy=task.RetryPolicy( - first_retry_interval=timedelta(seconds=2), - max_number_of_attempts=3, - backoff_coefficient=4, - max_retry_interval=timedelta(seconds=5), - retry_timeout=timedelta(seconds=50)), - input="Tokyo") + t1 = ctx.call_activity( + dummy_activity, + retry_policy=task.RetryPolicy( + first_retry_interval=timedelta(seconds=2), + max_number_of_attempts=3, + backoff_coefficient=4, + max_retry_interval=timedelta(seconds=5), + retry_timeout=timedelta(seconds=50), + ), + input="Tokyo", + ) t2 = ctx.call_activity(dummy_activity, input="Seattle") results = yield task.when_all([t1, t2]) return results @@ -1112,14 +1364,18 @@ def orchestrator(ctx: task.OrchestrationContext, _): # Simulate the task failing for the first time and confirm that a timer is scheduled for 2 seconds in the future old_events = [ helpers.new_orchestrator_started_event(timestamp=current_timestamp), - helpers.new_execution_started_event(orchestrator_name, TEST_INSTANCE_ID, encoded_input=None), + helpers.new_execution_started_event( + orchestrator_name, TEST_INSTANCE_ID, encoded_input=None + ), helpers.new_task_scheduled_event(1, task.get_name(dummy_activity)), - helpers.new_task_scheduled_event(2, task.get_name(dummy_activity))] + helpers.new_task_scheduled_event(2, task.get_name(dummy_activity)), + ] expected_fire_at = current_timestamp + timedelta(seconds=2) new_events = [ helpers.new_orchestrator_started_event(timestamp=current_timestamp), - helpers.new_task_failed_event(1, ValueError("Kah-BOOOOM!!!"))] + helpers.new_task_failed_event(1, ValueError("Kah-BOOOOM!!!")), + ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) actions = result.actions @@ -1133,7 +1389,8 @@ def orchestrator(ctx: task.OrchestrationContext, _): old_events = old_events + new_events new_events = [ helpers.new_orchestrator_started_event(current_timestamp), - helpers.new_timer_fired_event(3, current_timestamp)] + helpers.new_timer_fired_event(3, current_timestamp), + ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) actions = result.actions @@ -1146,7 +1403,8 @@ def orchestrator(ctx: task.OrchestrationContext, _): expected_fire_at = current_timestamp + timedelta(seconds=5) new_events = [ helpers.new_orchestrator_started_event(current_timestamp), - helpers.new_task_failed_event(1, ValueError("Kah-BOOOOM!!!"))] + helpers.new_task_failed_event(1, ValueError("Kah-BOOOOM!!!")), + ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) actions = result.actions @@ -1159,8 +1417,10 @@ def orchestrator(ctx: task.OrchestrationContext, _): # And, Simulate the timer firing at the expected time and confirm that another activity task is scheduled encoded_output = json.dumps(dummy_activity(None, "Seattle")) old_events = old_events + new_events - new_events = [helpers.new_task_completed_event(2, encoded_output), - helpers.new_timer_fired_event(4, current_timestamp)] + new_events = [ + helpers.new_task_completed_event(2, encoded_output), + helpers.new_timer_fired_event(4, current_timestamp), + ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) actions = result.actions @@ -1174,17 +1434,277 @@ def orchestrator(ctx: task.OrchestrationContext, _): old_events = old_events + new_events new_events = [ helpers.new_orchestrator_started_event(current_timestamp), - helpers.new_task_failed_event(1, ValueError("Kah-BOOOOM!!!"))] + helpers.new_task_failed_event(1, ValueError("Kah-BOOOOM!!!")), + ] executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) actions = result.actions complete_action = get_and_validate_single_complete_orchestration_action(actions) assert complete_action.orchestrationStatus == pb.ORCHESTRATION_STATUS_FAILED - assert complete_action.failureDetails.errorType == 'TaskFailedError' # TODO: Should this be the specific error? + assert ( + complete_action.failureDetails.errorType == "TaskFailedError" + ) # TODO: Should this be the specific error? assert str(ex) in complete_action.failureDetails.errorMessage -def get_and_validate_single_complete_orchestration_action(actions: List[pb.OrchestratorAction]) -> pb.CompleteOrchestrationAction: +def test_activity_non_retryable_default_exception(): + """If activity fails with NonRetryableError, it should not be retried and orchestration should fail immediately.""" + + def dummy_activity(ctx, _): + raise task.NonRetryableError("boom") + + def orchestrator(ctx: task.OrchestrationContext, _): + yield ctx.call_activity( + dummy_activity, + retry_policy=task.RetryPolicy( + first_retry_interval=timedelta(seconds=1), + max_number_of_attempts=3, + backoff_coefficient=1, + ), + ) + + registry = worker._Registry() + name = registry.add_orchestrator(orchestrator) + + current_timestamp = datetime.utcnow() + old_events = [ + helpers.new_orchestrator_started_event(timestamp=current_timestamp), + helpers.new_execution_started_event(name, TEST_INSTANCE_ID, encoded_input=None), + helpers.new_task_scheduled_event(1, task.get_name(dummy_activity)), + ] + new_events = [ + helpers.new_orchestrator_started_event(timestamp=current_timestamp), + helpers.new_task_failed_event(1, task.NonRetryableError("boom")), + ] + + executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) + result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) + actions = result.actions + complete_action = get_and_validate_single_complete_orchestration_action(actions) + assert complete_action.orchestrationStatus == pb.ORCHESTRATION_STATUS_FAILED + assert complete_action.failureDetails.errorMessage.__contains__("Activity task #1 failed: boom") + + +def test_activity_non_retryable_policy_name(): + """If policy marks ValueError as non-retryable (by name), fail immediately without retry.""" + + def dummy_activity(ctx, _): + raise ValueError("boom") + + def orchestrator(ctx: task.OrchestrationContext, _): + yield ctx.call_activity( + dummy_activity, + retry_policy=task.RetryPolicy( + first_retry_interval=timedelta(seconds=1), + max_number_of_attempts=5, + non_retryable_error_types=["ValueError"], + ), + ) + + registry = worker._Registry() + name = registry.add_orchestrator(orchestrator) + + current_timestamp = datetime.utcnow() + old_events = [ + helpers.new_orchestrator_started_event(timestamp=current_timestamp), + helpers.new_execution_started_event(name, TEST_INSTANCE_ID, encoded_input=None), + helpers.new_task_scheduled_event(1, task.get_name(dummy_activity)), + ] + new_events = [ + helpers.new_orchestrator_started_event(timestamp=current_timestamp), + helpers.new_task_failed_event(1, ValueError("boom")), + ] + + executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) + result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) + actions = result.actions + complete_action = get_and_validate_single_complete_orchestration_action(actions) + assert complete_action.orchestrationStatus == pb.ORCHESTRATION_STATUS_FAILED + assert complete_action.failureDetails.errorMessage.__contains__("Activity task #1 failed: boom") + + +def test_activity_generic_exception_is_retryable(): + """Verify that generic Exception is retryable by default (not treated as non-retryable).""" + + def dummy_activity(ctx, _): + raise Exception("generic error") + + def orchestrator(ctx: task.OrchestrationContext, _): + yield ctx.call_activity( + dummy_activity, + retry_policy=task.RetryPolicy( + first_retry_interval=timedelta(seconds=1), + max_number_of_attempts=3, + backoff_coefficient=1, + ), + ) + + registry = worker._Registry() + name = registry.add_orchestrator(orchestrator) + + current_timestamp = datetime.utcnow() + # First attempt fails + old_events = [ + helpers.new_orchestrator_started_event(timestamp=current_timestamp), + helpers.new_execution_started_event(name, TEST_INSTANCE_ID, encoded_input=None), + helpers.new_task_scheduled_event(1, task.get_name(dummy_activity)), + ] + new_events = [ + helpers.new_orchestrator_started_event(timestamp=current_timestamp), + helpers.new_task_failed_event(1, Exception("generic error")), + ] + + executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) + result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) + actions = result.actions + # Should schedule a retry timer, not fail immediately + assert len(actions) == 1 + assert actions[0].HasField("createTimer") + assert actions[0].id == 2 + + # Simulate the timer firing and activity being rescheduled + expected_fire_at = current_timestamp + timedelta(seconds=1) + old_events = old_events + new_events + current_timestamp = expected_fire_at + new_events = [ + helpers.new_orchestrator_started_event(current_timestamp), + helpers.new_timer_fired_event(2, current_timestamp), + ] + result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) + actions = result.actions + assert len(actions) == 2 # timer + rescheduled task + assert actions[1].HasField("scheduleTask") + assert actions[1].id == 1 + + # Second attempt also fails + old_events = old_events + new_events + new_events = [ + helpers.new_orchestrator_started_event(current_timestamp), + helpers.new_task_failed_event(1, Exception("generic error")), + ] + + result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) + actions = result.actions + # Should schedule another retry timer + assert len(actions) == 3 + assert actions[2].HasField("createTimer") + assert actions[2].id == 3 + + # Simulate the timer firing and activity being rescheduled + expected_fire_at = current_timestamp + timedelta(seconds=1) + old_events = old_events + new_events + current_timestamp = expected_fire_at + new_events = [ + helpers.new_orchestrator_started_event(current_timestamp), + helpers.new_timer_fired_event(3, current_timestamp), + ] + result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) + actions = result.actions + assert len(actions) == 3 # timer + rescheduled task + assert actions[1].HasField("scheduleTask") + assert actions[1].id == 1 + + # Third attempt fails - should exhaust retries + old_events = old_events + new_events + new_events = [ + helpers.new_orchestrator_started_event(current_timestamp), + helpers.new_task_failed_event(1, Exception("generic error")), + ] + + result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) + actions = result.actions + # Now should fail - no more retries + complete_action = get_and_validate_single_complete_orchestration_action(actions) + assert complete_action.orchestrationStatus == pb.ORCHESTRATION_STATUS_FAILED + assert complete_action.failureDetails.errorMessage.__contains__( + "Activity task #1 failed: generic error" + ) + + +def test_sub_orchestration_non_retryable_default_exception(): + """If sub-orchestrator fails with NonRetryableError, do not retry and fail immediately.""" + + def child(ctx: task.OrchestrationContext, _): + pass + + def parent(ctx: task.OrchestrationContext, _): + yield ctx.call_sub_orchestrator( + child, + retry_policy=task.RetryPolicy( + first_retry_interval=timedelta(seconds=1), + max_number_of_attempts=3, + ), + ) + + registry = worker._Registry() + child_name = registry.add_orchestrator(child) + parent_name = registry.add_orchestrator(parent) + + current_timestamp = datetime.utcnow() + old_events = [ + helpers.new_orchestrator_started_event(timestamp=current_timestamp), + helpers.new_execution_started_event(parent_name, TEST_INSTANCE_ID, encoded_input=None), + helpers.new_sub_orchestration_created_event(1, child_name, "sub-1", encoded_input=None), + ] + new_events = [ + helpers.new_orchestrator_started_event(timestamp=current_timestamp), + helpers.new_sub_orchestration_failed_event(1, task.NonRetryableError("boom")), + ] + + executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) + result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) + actions = result.actions + complete_action = get_and_validate_single_complete_orchestration_action(actions) + assert complete_action.orchestrationStatus == pb.ORCHESTRATION_STATUS_FAILED + assert complete_action.failureDetails.errorMessage.__contains__( + "Sub-orchestration task #1 failed: boom" + ) + + +def test_sub_orchestration_non_retryable_policy_type(): + """If policy marks ValueError as non-retryable (by class), fail immediately without retry.""" + + def child(ctx: task.OrchestrationContext, _): + pass + + def parent(ctx: task.OrchestrationContext, _): + yield ctx.call_sub_orchestrator( + child, + retry_policy=task.RetryPolicy( + first_retry_interval=timedelta(seconds=1), + max_number_of_attempts=5, + non_retryable_error_types=[ValueError], + ), + ) + + registry = worker._Registry() + child_name = registry.add_orchestrator(child) + parent_name = registry.add_orchestrator(parent) + + current_timestamp = datetime.utcnow() + old_events = [ + helpers.new_orchestrator_started_event(timestamp=current_timestamp), + helpers.new_execution_started_event(parent_name, TEST_INSTANCE_ID, encoded_input=None), + helpers.new_sub_orchestration_created_event(1, child_name, "sub-1", encoded_input=None), + ] + new_events = [ + helpers.new_orchestrator_started_event(timestamp=current_timestamp), + helpers.new_sub_orchestration_failed_event(1, ValueError("boom")), + ] + + executor = worker._OrchestrationExecutor(registry, TEST_LOGGER) + result = executor.execute(TEST_INSTANCE_ID, old_events, new_events) + actions = result.actions + complete_action = get_and_validate_single_complete_orchestration_action(actions) + assert complete_action.orchestrationStatus == pb.ORCHESTRATION_STATUS_FAILED + assert complete_action.failureDetails.errorMessage.__contains__( + "Sub-orchestration task #1 failed: boom" + ) + + +def get_and_validate_single_complete_orchestration_action( + actions: list[pb.OrchestratorAction], +) -> pb.CompleteOrchestrationAction: assert len(actions) == 1 assert type(actions[0]) is pb.OrchestratorAction assert actions[0].HasField("completeOrchestration") diff --git a/tests/durabletask/test_orchestration_wait.py b/tests/durabletask/test_orchestration_wait.py new file mode 100644 index 00000000..49eab0e2 --- /dev/null +++ b/tests/durabletask/test_orchestration_wait.py @@ -0,0 +1,69 @@ +from unittest.mock import Mock + +import pytest + +from durabletask.client import TaskHubGrpcClient + + +@pytest.mark.parametrize("timeout", [None, 0, 5]) +def test_wait_for_orchestration_start_timeout(timeout): + instance_id = "test-instance" + + from durabletask.internal.orchestrator_service_pb2 import ( + ORCHESTRATION_STATUS_RUNNING, + GetInstanceResponse, + OrchestrationState, + ) + + response = GetInstanceResponse() + state = OrchestrationState() + state.instanceId = instance_id + state.orchestrationStatus = ORCHESTRATION_STATUS_RUNNING + response.orchestrationState.CopyFrom(state) + + c = TaskHubGrpcClient() + c._stub = Mock() + c._stub.WaitForInstanceStart.return_value = response + + grpc_timeout = None if timeout is None else timeout + c.wait_for_orchestration_start(instance_id, timeout=grpc_timeout) + + # Verify WaitForInstanceStart was called with timeout=None + c._stub.WaitForInstanceStart.assert_called_once() + _, kwargs = c._stub.WaitForInstanceStart.call_args + if timeout is None or timeout == 0: + assert kwargs.get("timeout") is None + else: + assert kwargs.get("timeout") == timeout + + +@pytest.mark.parametrize("timeout", [None, 0, 5]) +def test_wait_for_orchestration_completion_timeout(timeout): + instance_id = "test-instance" + + from durabletask.internal.orchestrator_service_pb2 import ( + ORCHESTRATION_STATUS_COMPLETED, + GetInstanceResponse, + OrchestrationState, + ) + + response = GetInstanceResponse() + state = OrchestrationState() + state.instanceId = instance_id + state.orchestrationStatus = ORCHESTRATION_STATUS_COMPLETED + response.orchestrationState.CopyFrom(state) + + c = TaskHubGrpcClient() + c._stub = Mock() + c._stub.WaitForInstanceCompletion.return_value = response + + grpc_timeout = None if timeout is None else timeout + c.wait_for_orchestration_completion(instance_id, timeout=grpc_timeout) + + # Verify WaitForInstanceStart was called with timeout=None + c._stub.WaitForInstanceCompletion.assert_called_once() + _, kwargs = c._stub.WaitForInstanceCompletion.call_args + if timeout is None or timeout == 0: + assert kwargs.get("timeout") is None + else: + assert kwargs.get("timeout") == timeout diff --git a/tests/durabletask/test_registry.py b/tests/durabletask/test_registry.py new file mode 100644 index 00000000..b5fcfa9f --- /dev/null +++ b/tests/durabletask/test_registry.py @@ -0,0 +1,205 @@ +""" +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +"""Unit tests for the _Registry class validation logic.""" + +import pytest + +from durabletask import worker + + +def test_registry_add_orchestrator_none(): + """Test that adding a None orchestrator raises ValueError.""" + registry = worker._Registry() + + with pytest.raises(ValueError, match="An orchestrator function argument is required"): + registry.add_orchestrator(None) + + +def test_registry_add_named_orchestrator_empty_name(): + """Test that adding an orchestrator with empty name raises ValueError.""" + registry = worker._Registry() + + def dummy_orchestrator(ctx, input): + return "done" + + with pytest.raises(ValueError, match="A non-empty orchestrator name is required"): + registry.add_named_orchestrator("", dummy_orchestrator) + + +def test_registry_add_orchestrator_duplicate(): + """Test that adding a duplicate orchestrator raises ValueError.""" + registry = worker._Registry() + + def dummy_orchestrator(ctx, input): + return "done" + + name = "test_orchestrator" + registry.add_named_orchestrator(name, dummy_orchestrator) + + with pytest.raises(ValueError, match=f"A '{name}' orchestrator already exists"): + registry.add_named_orchestrator(name, dummy_orchestrator) + + +def test_registry_add_activity_none(): + """Test that adding a None activity raises ValueError.""" + registry = worker._Registry() + + with pytest.raises(ValueError, match="An activity function argument is required"): + registry.add_activity(None) + + +def test_registry_add_named_activity_empty_name(): + """Test that adding an activity with empty name raises ValueError.""" + registry = worker._Registry() + + def dummy_activity(ctx, input): + return "done" + + with pytest.raises(ValueError, match="A non-empty activity name is required"): + registry.add_named_activity("", dummy_activity) + + +def test_registry_add_activity_duplicate(): + """Test that adding a duplicate activity raises ValueError.""" + registry = worker._Registry() + + def dummy_activity(ctx, input): + return "done" + + name = "test_activity" + registry.add_named_activity(name, dummy_activity) + + with pytest.raises(ValueError, match=f"A '{name}' activity already exists"): + registry.add_named_activity(name, dummy_activity) + + +def test_registry_get_orchestrator_exists(): + """Test retrieving an existing orchestrator.""" + registry = worker._Registry() + + def dummy_orchestrator(ctx, input): + return "done" + + name = registry.add_orchestrator(dummy_orchestrator) + retrieved, _ = registry.get_orchestrator(name) + + assert retrieved is dummy_orchestrator + + +def test_registry_get_orchestrator_not_exists(): + """Test retrieving a non-existent orchestrator returns None.""" + registry = worker._Registry() + + retrieved, _ = registry.get_orchestrator("non_existent") + + assert retrieved is None + + +def test_registry_get_activity_exists(): + """Test retrieving an existing activity.""" + registry = worker._Registry() + + def dummy_activity(ctx, input): + return "done" + + name = registry.add_activity(dummy_activity) + retrieved = registry.get_activity(name) + + assert retrieved is dummy_activity + + +def test_registry_get_activity_not_exists(): + """Test retrieving a non-existent activity returns None.""" + registry = worker._Registry() + + retrieved = registry.get_activity("non_existent") + + assert retrieved is None + + +def test_registry_add_multiple_orchestrators(): + """Test adding multiple different orchestrators.""" + registry = worker._Registry() + + def orchestrator1(ctx, input): + return "one" + + def orchestrator2(ctx, input): + return "two" + + name1 = registry.add_orchestrator(orchestrator1) + name2 = registry.add_orchestrator(orchestrator2) + + assert name1 != name2 + orchestrator1, _ = registry.get_orchestrator(name1) + orchestrator2, _ = registry.get_orchestrator(name2) + assert orchestrator1 is not None + assert orchestrator2 is not None + + +def test_registry_add_multiple_activities(): + """Test adding multiple different activities.""" + registry = worker._Registry() + + def activity1(ctx, input): + return "one" + + def activity2(ctx, input): + return "two" + + name1 = registry.add_activity(activity1) + name2 = registry.add_activity(activity2) + + assert name1 != name2 + assert registry.get_activity(name1) is activity1 + assert registry.get_activity(name2) is activity2 + +def test_registry_add_named_versioned_orchestrators(): + """Test adding versioned orchestrators.""" + registry = worker._Registry() + + def orchestrator1(ctx, input): + return "one" + + def orchestrator2(ctx, input): + return "two" + + def orchestrator3(ctx, input): + return "two" + + registry.add_named_orchestrator(name="orchestrator", fn=orchestrator1, version_name="v1") + registry.add_named_orchestrator(name="orchestrator", fn=orchestrator2, version_name="v2", is_latest=True) + registry.add_named_orchestrator(name="orchestrator", fn=orchestrator3, version_name="v3") + + orquestrator, version = registry.get_orchestrator(name="orchestrator") + assert orquestrator is orchestrator2 + assert version == "v2" + + orquestrator, version = registry.get_orchestrator(name="orchestrator", version_name="v1") + assert orquestrator is orchestrator1 + assert version == "v1" + + orquestrator, version = registry.get_orchestrator(name="orchestrator", version_name="v2") + assert orquestrator is orchestrator2 + assert version == "v2" + + orquestrator, version = registry.get_orchestrator(name="orchestrator", version_name="v3") + assert orquestrator is orchestrator3 + assert version == "v3" + + with pytest.raises(worker.VersionNotRegisteredException): + registry.get_orchestrator(name="orchestrator", version_name="v4") + + orquestrator, _ = registry.get_orchestrator(name="non-existent") + assert orquestrator is None diff --git a/tests/durabletask/test_serialization.py b/tests/durabletask/test_serialization.py new file mode 100644 index 00000000..68f7f141 --- /dev/null +++ b/tests/durabletask/test_serialization.py @@ -0,0 +1,87 @@ +""" +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +from collections import namedtuple +from dataclasses import dataclass +from types import SimpleNamespace + +from durabletask.internal.shared import AUTO_SERIALIZED, from_json, to_json + + +@dataclass +class SamplePayload: + count: int + name: str + + +def test_to_json_roundtrip_dataclass(): + payload = SamplePayload(count=5, name="widgets") + encoded = to_json(payload) + + assert AUTO_SERIALIZED in encoded + + decoded = from_json(encoded) + assert isinstance(decoded, SimpleNamespace) + assert decoded.count == 5 + assert decoded.name == "widgets" + + +def test_to_json_roundtrip_simplenamespace(): + payload = SimpleNamespace(foo="bar", baz=42) + encoded = to_json(payload) + + assert AUTO_SERIALIZED in encoded + + decoded = from_json(encoded) + assert isinstance(decoded, SimpleNamespace) + assert decoded.foo == "bar" + assert decoded.baz == 42 + + +def test_to_json_plain_dict_passthrough(): + payload = {"foo": "bar", "baz": 1} + encoded = to_json(payload) + + assert AUTO_SERIALIZED not in encoded + + decoded = from_json(encoded) + assert isinstance(decoded, dict) + assert decoded == {"foo": "bar", "baz": 1} + + +def test_to_json_namedtuple_roundtrip(): + Point = namedtuple("Point", ["x", "y"]) + payload = Point(3, 4) + encoded = to_json(payload) + + assert AUTO_SERIALIZED in encoded + + decoded = from_json(encoded) + assert isinstance(decoded, SimpleNamespace) + assert decoded.x == 3 + assert decoded.y == 4 + + +def test_to_json_nested_dataclass_collection(): + payload = [ + SamplePayload(count=1, name="first"), + SamplePayload(count=2, name="second"), + ] + encoded = to_json(payload) + + assert encoded.count(AUTO_SERIALIZED) >= 2 + + decoded = from_json(encoded) + assert isinstance(decoded, list) + assert [item.count for item in decoded] == [1, 2] + assert [item.name for item in decoded] == ["first", "second"] diff --git a/tests/durabletask/test_task.py b/tests/durabletask/test_task.py new file mode 100644 index 00000000..d8ec88ea --- /dev/null +++ b/tests/durabletask/test_task.py @@ -0,0 +1,115 @@ +# Copyright (c) The Dapr Authors. +# Licensed under the MIT License. + +"""Unit tests for durabletask.task primitives.""" + +from durabletask import task + + +def test_when_all_empty_returns_successfully(): + """task.when_all([]) should complete immediately and return an empty list.""" + when_all_task = task.when_all([]) + + assert when_all_task.is_complete + assert when_all_task.get_result() == [] + + +def test_when_any_empty_returns_successfully(): + """task.when_any([]) should complete immediately and return an empty list.""" + when_any_task = task.when_any([]) + + assert when_any_task.is_complete + assert when_any_task.get_result() == [] + + +def test_when_all_happy_path_returns_ordered_results_and_completes_last(): + c1 = task.CompletableTask() + c2 = task.CompletableTask() + c3 = task.CompletableTask() + + all_task = task.when_all([c1, c2, c3]) + + assert not all_task.is_complete + + c2.complete("two") + + assert not all_task.is_complete + + c1.complete("one") + + assert not all_task.is_complete + + c3.complete("three") + + assert all_task.is_complete + + assert all_task.get_result() == ["one", "two", "three"] + + +def test_when_all_is_composable_with_when_any(): + c1 = task.CompletableTask() + c2 = task.CompletableTask() + + any_task = task.when_any([c1, c2]) + all_task = task.when_all([any_task]) + + assert not any_task.is_complete + assert not all_task.is_complete + + c2.complete("two") + + assert any_task.is_complete + assert all_task.is_complete + assert all_task.get_result() == [c2] + + +def test_when_any_is_composable_with_when_all(): + c1 = task.CompletableTask() + c2 = task.CompletableTask() + c3 = task.CompletableTask() + + all_task1 = task.when_all([c1, c2]) + all_task2 = task.when_all([c3]) + any_task = task.when_any([all_task1, all_task2]) + + assert not any_task.is_complete + assert not all_task1.is_complete + assert not all_task2.is_complete + + c1.complete("one") + + assert not any_task.is_complete + assert not all_task1.is_complete + assert not all_task2.is_complete + + c2.complete("two") + + assert any_task.is_complete + assert all_task1.is_complete + assert not all_task2.is_complete + + assert any_task.get_result() == all_task1 + + +def test_when_any_happy_path_returns_winner_task_and_completes_on_first(): + a = task.CompletableTask() + b = task.CompletableTask() + + any_task = task.when_any([a, b]) + + assert not any_task.is_complete + + b.complete("B") + + assert any_task.is_complete + + winner = any_task.get_result() + + assert winner is b + + assert winner.get_result() == "B" + + # Completing the other child should not change the winner + a.complete("A") + + assert any_task.get_result() is b diff --git a/tests/durabletask/test_worker_concurrency_loop.py b/tests/durabletask/test_worker_concurrency_loop.py new file mode 100644 index 00000000..53b6c9ae --- /dev/null +++ b/tests/durabletask/test_worker_concurrency_loop.py @@ -0,0 +1,149 @@ +import asyncio +import threading +import time + +from durabletask.worker import ConcurrencyOptions, TaskHubGrpcWorker + + +class DummyStub: + def __init__(self): + self.completed = [] + + def CompleteOrchestratorTask(self, res): + self.completed.append(("orchestrator", res)) + + def CompleteActivityTask(self, res): + self.completed.append(("activity", res)) + + +class DummyRequest: + def __init__(self, kind, instance_id): + self.kind = kind + self.instanceId = instance_id + self.orchestrationInstance = type("O", (), {"instanceId": instance_id}) + self.name = "dummy" + self.taskId = 1 + self.input = type("I", (), {"value": ""}) + self.pastEvents = [] + self.newEvents = [] + + def HasField(self, field): + return (field == "orchestratorRequest" and self.kind == "orchestrator") or ( + field == "activityRequest" and self.kind == "activity" + ) + + def WhichOneof(self, _): + return f"{self.kind}Request" + + +class DummyCompletionToken: + pass + + +def test_worker_concurrency_loop_sync(): + options = ConcurrencyOptions( + maximum_concurrent_activity_work_items=2, + maximum_concurrent_orchestration_work_items=1, + maximum_thread_pool_workers=2, + ) + worker = TaskHubGrpcWorker(concurrency_options=options) + stub = DummyStub() + + def dummy_orchestrator(req, stub, completionToken): + time.sleep(0.1) + stub.CompleteOrchestratorTask("ok") + + def dummy_activity(req, stub, completionToken): + time.sleep(0.1) + stub.CompleteActivityTask("ok") + + # Patch the worker's _execute_orchestrator and _execute_activity + worker._execute_orchestrator = dummy_orchestrator + worker._execute_activity = dummy_activity + + orchestrator_requests = [DummyRequest("orchestrator", f"orch{i}") for i in range(3)] + activity_requests = [DummyRequest("activity", f"act{i}") for i in range(4)] + + async def run_test(): + # Start the worker manager's run loop in the background + worker_task = asyncio.create_task(worker._async_worker_manager.run()) + for req in orchestrator_requests: + worker._async_worker_manager.submit_orchestration( + dummy_orchestrator, req, stub, DummyCompletionToken() + ) + for req in activity_requests: + worker._async_worker_manager.submit_activity( + dummy_activity, req, stub, DummyCompletionToken() + ) + await asyncio.sleep(1.0) + orchestrator_count = sum(1 for t, _ in stub.completed if t == "orchestrator") + activity_count = sum(1 for t, _ in stub.completed if t == "activity") + assert orchestrator_count == 3, ( + f"Expected 3 orchestrator completions, got {orchestrator_count}" + ) + assert activity_count == 4, f"Expected 4 activity completions, got {activity_count}" + worker._async_worker_manager._shutdown = True + await worker_task + + asyncio.run(run_test()) + + +# Dummy orchestrator and activity for sync context +def dummy_orchestrator(ctx, input): + # Simulate some work + time.sleep(0.1) + return "orchestrator-done" + + +def dummy_activity(ctx, input): + # Simulate some work + time.sleep(0.1) + return "activity-done" + + +def test_worker_concurrency_sync(): + # Use small concurrency to make test observable + options = ConcurrencyOptions( + maximum_concurrent_activity_work_items=2, + maximum_concurrent_orchestration_work_items=2, + maximum_thread_pool_workers=2, + ) + worker = TaskHubGrpcWorker(concurrency_options=options) + worker.add_orchestrator(dummy_orchestrator) + worker.add_activity(dummy_activity) + + # Simulate submitting work items to the queues directly (bypassing gRPC) + # We'll use the internal _async_worker_manager for this test + manager = worker._async_worker_manager + results = [] + lock = threading.Lock() + + def make_work(kind, idx): + def fn(*args, **kwargs): + time.sleep(0.1) + with lock: + results.append((kind, idx)) + return f"{kind}-{idx}-done" + + return fn + + # Submit more work than concurrency allows + for i in range(5): + manager.submit_orchestration(make_work("orch", i)) + manager.submit_activity(make_work("act", i)) + + # Run the manager loop in a thread (sync context) + def run_manager(): + asyncio.run(manager.run()) + + t = threading.Thread(target=run_manager) + t.start() + time.sleep(1.5) # Let work process + manager.shutdown() + # Unblock the consumers by putting dummy items in the queues + manager.activity_queue.put_nowait((lambda: None, (), {})) + manager.orchestration_queue.put_nowait((lambda: None, (), {})) + t.join(timeout=2) + + # Check that all work items completed + assert len(results) == 10 diff --git a/tests/durabletask/test_worker_concurrency_loop_async.py b/tests/durabletask/test_worker_concurrency_loop_async.py new file mode 100644 index 00000000..a88e3e31 --- /dev/null +++ b/tests/durabletask/test_worker_concurrency_loop_async.py @@ -0,0 +1,88 @@ +import asyncio + +from durabletask.worker import ConcurrencyOptions, TaskHubGrpcWorker + + +class DummyStub: + def __init__(self): + self.completed = [] + + def CompleteOrchestratorTask(self, res): + self.completed.append(("orchestrator", res)) + + def CompleteActivityTask(self, res): + self.completed.append(("activity", res)) + + +class DummyRequest: + def __init__(self, kind, instance_id): + self.kind = kind + self.instanceId = instance_id + self.orchestrationInstance = type("O", (), {"instanceId": instance_id}) + self.name = "dummy" + self.taskId = 1 + self.input = type("I", (), {"value": ""}) + self.pastEvents = [] + self.newEvents = [] + + def HasField(self, field): + return (field == "orchestratorRequest" and self.kind == "orchestrator") or ( + field == "activityRequest" and self.kind == "activity" + ) + + def WhichOneof(self, _): + return f"{self.kind}Request" + + +class DummyCompletionToken: + pass + + +def test_worker_concurrency_loop_async(): + options = ConcurrencyOptions( + maximum_concurrent_activity_work_items=2, + maximum_concurrent_orchestration_work_items=1, + maximum_thread_pool_workers=2, + ) + grpc_worker = TaskHubGrpcWorker(concurrency_options=options) + stub = DummyStub() + + async def dummy_orchestrator(req, stub, completionToken): + await asyncio.sleep(0.1) + stub.CompleteOrchestratorTask("ok") + + async def dummy_activity(req, stub, completionToken): + await asyncio.sleep(0.1) + stub.CompleteActivityTask("ok") + + # Patch the worker's _execute_orchestrator and _execute_activity + grpc_worker._execute_orchestrator = dummy_orchestrator + grpc_worker._execute_activity = dummy_activity + + orchestrator_requests = [DummyRequest("orchestrator", f"orch{i}") for i in range(3)] + activity_requests = [DummyRequest("activity", f"act{i}") for i in range(4)] + + async def run_test(): + # Clear stub state before each run + stub.completed.clear() + worker_task = asyncio.create_task(grpc_worker._async_worker_manager.run()) + for req in orchestrator_requests: + grpc_worker._async_worker_manager.submit_orchestration( + dummy_orchestrator, req, stub, DummyCompletionToken() + ) + for req in activity_requests: + grpc_worker._async_worker_manager.submit_activity( + dummy_activity, req, stub, DummyCompletionToken() + ) + await asyncio.sleep(1.0) + orchestrator_count = sum(1 for t, _ in stub.completed if t == "orchestrator") + activity_count = sum(1 for t, _ in stub.completed if t == "activity") + assert orchestrator_count == 3, ( + f"Expected 3 orchestrator completions, got {orchestrator_count}" + ) + assert activity_count == 4, f"Expected 4 activity completions, got {activity_count}" + grpc_worker._async_worker_manager._shutdown = True + await worker_task + + asyncio.run(run_test()) + asyncio.run(run_test()) diff --git a/tests/test_client.py b/tests/test_client.py deleted file mode 100644 index b27f8e36..00000000 --- a/tests/test_client.py +++ /dev/null @@ -1,41 +0,0 @@ -from unittest.mock import patch - -from durabletask.internal.shared import (DefaultClientInterceptorImpl, - get_default_host_address, - get_grpc_channel) - -HOST_ADDRESS = 'localhost:50051' -METADATA = [('key1', 'value1'), ('key2', 'value2')] - - -def test_get_grpc_channel_insecure(): - with patch('grpc.insecure_channel') as mock_channel: - get_grpc_channel(HOST_ADDRESS, METADATA, False) - mock_channel.assert_called_once_with(HOST_ADDRESS) - - -def test_get_grpc_channel_secure(): - with patch('grpc.secure_channel') as mock_channel, patch( - 'grpc.ssl_channel_credentials') as mock_credentials: - get_grpc_channel(HOST_ADDRESS, METADATA, True) - mock_channel.assert_called_once_with(HOST_ADDRESS, mock_credentials.return_value) - - -def test_get_grpc_channel_default_host_address(): - with patch('grpc.insecure_channel') as mock_channel: - get_grpc_channel(None, METADATA, False) - mock_channel.assert_called_once_with(get_default_host_address()) - - -def test_get_grpc_channel_with_metadata(): - with patch('grpc.insecure_channel') as mock_channel, patch( - 'grpc.intercept_channel') as mock_intercept_channel: - get_grpc_channel(HOST_ADDRESS, METADATA, False) - mock_channel.assert_called_once_with(HOST_ADDRESS) - mock_intercept_channel.assert_called_once() - - # Capture and check the arguments passed to intercept_channel() - args, kwargs = mock_intercept_channel.call_args - assert args[0] == mock_channel.return_value - assert isinstance(args[1], DefaultClientInterceptorImpl) - assert args[1]._metadata == METADATA diff --git a/tests/test_orchestration_e2e.py b/tests/test_orchestration_e2e.py deleted file mode 100644 index 1cfc520b..00000000 --- a/tests/test_orchestration_e2e.py +++ /dev/null @@ -1,469 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -import json -import threading -import time -from datetime import timedelta - -import pytest - -from durabletask import client, task, worker - -# NOTE: These tests assume a sidecar process is running. Example command: -# docker run --name durabletask-sidecar -p 4001:4001 --env 'DURABLETASK_SIDECAR_LOGLEVEL=Debug' --rm cgillum/durabletask-sidecar:latest start --backend Emulator -pytestmark = pytest.mark.e2e - - -def test_empty_orchestration(): - - invoked = False - - def empty_orchestrator(ctx: task.OrchestrationContext, _): - nonlocal invoked # don't do this in a real app! - invoked = True - - # Start a worker, which will connect to the sidecar in a background thread - with worker.TaskHubGrpcWorker() as w: - w.add_orchestrator(empty_orchestrator) - w.start() - - c = client.TaskHubGrpcClient() - id = c.schedule_new_orchestration(empty_orchestrator) - state = c.wait_for_orchestration_completion(id, timeout=30) - - assert invoked - assert state is not None - assert state.name == task.get_name(empty_orchestrator) - assert state.instance_id == id - assert state.failure_details is None - assert state.runtime_status == client.OrchestrationStatus.COMPLETED - assert state.serialized_input is None - assert state.serialized_output is None - assert state.serialized_custom_status is None - - -def test_activity_sequence(): - - def plus_one(_: task.ActivityContext, input: int) -> int: - return input + 1 - - def sequence(ctx: task.OrchestrationContext, start_val: int): - numbers = [start_val] - current = start_val - for _ in range(10): - current = yield ctx.call_activity(plus_one, input=current) - numbers.append(current) - return numbers - - # Start a worker, which will connect to the sidecar in a background thread - with worker.TaskHubGrpcWorker() as w: - w.add_orchestrator(sequence) - w.add_activity(plus_one) - w.start() - - task_hub_client = client.TaskHubGrpcClient() - id = task_hub_client.schedule_new_orchestration(sequence, input=1) - state = task_hub_client.wait_for_orchestration_completion( - id, timeout=30) - - assert state is not None - assert state.name == task.get_name(sequence) - assert state.instance_id == id - assert state.runtime_status == client.OrchestrationStatus.COMPLETED - assert state.failure_details is None - assert state.serialized_input == json.dumps(1) - assert state.serialized_output == json.dumps([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) - assert state.serialized_custom_status is None - - -def test_activity_error_handling(): - - def throw(_: task.ActivityContext, input: int) -> int: - raise RuntimeError("Kah-BOOOOM!!!") - - compensation_counter = 0 - - def increment_counter(ctx, _): - nonlocal compensation_counter - compensation_counter += 1 - - def orchestrator(ctx: task.OrchestrationContext, input: int): - error_msg = "" - try: - yield ctx.call_activity(throw, input=input) - except task.TaskFailedError as e: - error_msg = e.details.message - - # compensating actions - yield ctx.call_activity(increment_counter) - yield ctx.call_activity(increment_counter) - - return error_msg - - # Start a worker, which will connect to the sidecar in a background thread - with worker.TaskHubGrpcWorker() as w: - w.add_orchestrator(orchestrator) - w.add_activity(throw) - w.add_activity(increment_counter) - w.start() - - task_hub_client = client.TaskHubGrpcClient() - id = task_hub_client.schedule_new_orchestration(orchestrator, input=1) - state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) - - assert state is not None - assert state.name == task.get_name(orchestrator) - assert state.instance_id == id - assert state.runtime_status == client.OrchestrationStatus.COMPLETED - assert state.serialized_output == json.dumps("Kah-BOOOOM!!!") - assert state.failure_details is None - assert state.serialized_custom_status is None - assert compensation_counter == 2 - - -def test_sub_orchestration_fan_out(): - threadLock = threading.Lock() - activity_counter = 0 - - def increment(ctx, _): - with threadLock: - nonlocal activity_counter - activity_counter += 1 - - def orchestrator_child(ctx: task.OrchestrationContext, activity_count: int): - for _ in range(activity_count): - yield ctx.call_activity(increment) - - def parent_orchestrator(ctx: task.OrchestrationContext, count: int): - # Fan out to multiple sub-orchestrations - tasks = [] - for _ in range(count): - tasks.append(ctx.call_sub_orchestrator( - orchestrator_child, input=3)) - # Wait for all sub-orchestrations to complete - yield task.when_all(tasks) - - # Start a worker, which will connect to the sidecar in a background thread - with worker.TaskHubGrpcWorker() as w: - w.add_activity(increment) - w.add_orchestrator(orchestrator_child) - w.add_orchestrator(parent_orchestrator) - w.start() - - task_hub_client = client.TaskHubGrpcClient() - id = task_hub_client.schedule_new_orchestration(parent_orchestrator, input=10) - state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) - - assert state is not None - assert state.runtime_status == client.OrchestrationStatus.COMPLETED - assert state.failure_details is None - assert activity_counter == 30 - - -def test_wait_for_multiple_external_events(): - def orchestrator(ctx: task.OrchestrationContext, _): - a = yield ctx.wait_for_external_event('A') - b = yield ctx.wait_for_external_event('B') - c = yield ctx.wait_for_external_event('C') - return [a, b, c] - - # Start a worker, which will connect to the sidecar in a background thread - with worker.TaskHubGrpcWorker() as w: - w.add_orchestrator(orchestrator) - w.start() - - # Start the orchestration and immediately raise events to it. - task_hub_client = client.TaskHubGrpcClient() - id = task_hub_client.schedule_new_orchestration(orchestrator) - task_hub_client.raise_orchestration_event(id, 'A', data='a') - task_hub_client.raise_orchestration_event(id, 'B', data='b') - task_hub_client.raise_orchestration_event(id, 'C', data='c') - state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) - - assert state is not None - assert state.runtime_status == client.OrchestrationStatus.COMPLETED - assert state.serialized_output == json.dumps(['a', 'b', 'c']) - - -@pytest.mark.parametrize("raise_event", [True, False]) -def test_wait_for_external_event_timeout(raise_event: bool): - def orchestrator(ctx: task.OrchestrationContext, _): - approval: task.Task[bool] = ctx.wait_for_external_event('Approval') - timeout = ctx.create_timer(timedelta(seconds=3)) - winner = yield task.when_any([approval, timeout]) - if winner == approval: - return "approved" - else: - return "timed out" - - # Start a worker, which will connect to the sidecar in a background thread - with worker.TaskHubGrpcWorker() as w: - w.add_orchestrator(orchestrator) - w.start() - - # Start the orchestration and immediately raise events to it. - task_hub_client = client.TaskHubGrpcClient() - id = task_hub_client.schedule_new_orchestration(orchestrator) - if raise_event: - task_hub_client.raise_orchestration_event(id, 'Approval') - state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) - - assert state is not None - assert state.runtime_status == client.OrchestrationStatus.COMPLETED - if raise_event: - assert state.serialized_output == json.dumps("approved") - else: - assert state.serialized_output == json.dumps("timed out") - - -def test_suspend_and_resume(): - def orchestrator(ctx: task.OrchestrationContext, _): - result = yield ctx.wait_for_external_event("my_event") - return result - - # Start a worker, which will connect to the sidecar in a background thread - with worker.TaskHubGrpcWorker() as w: - w.add_orchestrator(orchestrator) - w.start() - - task_hub_client = client.TaskHubGrpcClient() - id = task_hub_client.schedule_new_orchestration(orchestrator) - state = task_hub_client.wait_for_orchestration_start(id, timeout=30) - assert state is not None - - # Suspend the orchestration and wait for it to go into the SUSPENDED state - task_hub_client.suspend_orchestration(id) - while state.runtime_status == client.OrchestrationStatus.RUNNING: - time.sleep(0.1) - state = task_hub_client.get_orchestration_state(id) - assert state is not None - assert state.runtime_status == client.OrchestrationStatus.SUSPENDED - - # Raise an event to the orchestration and confirm that it does NOT complete - task_hub_client.raise_orchestration_event(id, "my_event", data=42) - try: - state = task_hub_client.wait_for_orchestration_completion(id, timeout=3) - assert False, "Orchestration should not have completed" - except TimeoutError: - pass - - # Resume the orchestration and wait for it to complete - task_hub_client.resume_orchestration(id) - state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) - assert state is not None - assert state.runtime_status == client.OrchestrationStatus.COMPLETED - assert state.serialized_output == json.dumps(42) - - -def test_terminate(): - def orchestrator(ctx: task.OrchestrationContext, _): - result = yield ctx.wait_for_external_event("my_event") - return result - - # Start a worker, which will connect to the sidecar in a background thread - with worker.TaskHubGrpcWorker() as w: - w.add_orchestrator(orchestrator) - w.start() - - task_hub_client = client.TaskHubGrpcClient() - id = task_hub_client.schedule_new_orchestration(orchestrator) - state = task_hub_client.wait_for_orchestration_start(id, timeout=30) - assert state is not None - assert state.runtime_status == client.OrchestrationStatus.RUNNING - - task_hub_client.terminate_orchestration(id, output="some reason for termination") - state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) - assert state is not None - assert state.runtime_status == client.OrchestrationStatus.TERMINATED - assert state.serialized_output == json.dumps("some reason for termination") - -def test_terminate_recursive(): - def root(ctx: task.OrchestrationContext, _): - result = yield ctx.call_sub_orchestrator(child) - return result - def child(ctx: task.OrchestrationContext, _): - result = yield ctx.wait_for_external_event("my_event") - return result - - # Start a worker, which will connect to the sidecar in a background thread - with worker.TaskHubGrpcWorker() as w: - w.add_orchestrator(root) - w.add_orchestrator(child) - w.start() - - task_hub_client = client.TaskHubGrpcClient() - id = task_hub_client.schedule_new_orchestration(root) - state = task_hub_client.wait_for_orchestration_start(id, timeout=30) - assert state is not None - assert state.runtime_status == client.OrchestrationStatus.RUNNING - - # Terminate root orchestration(recursive set to True by default) - task_hub_client.terminate_orchestration(id, output="some reason for termination") - state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) - assert state is not None - assert state.runtime_status == client.OrchestrationStatus.TERMINATED - - # Verify that child orchestration is also terminated - c = task_hub_client.wait_for_orchestration_completion(id, timeout=30) - assert state is not None - assert state.runtime_status == client.OrchestrationStatus.TERMINATED - - task_hub_client.purge_orchestration(id) - state = task_hub_client.get_orchestration_state(id) - assert state is None - - -def test_continue_as_new(): - all_results = [] - - def orchestrator(ctx: task.OrchestrationContext, input: int): - result = yield ctx.wait_for_external_event("my_event") - if not ctx.is_replaying: - # NOTE: Real orchestrations should never interact with nonlocal variables like this. - nonlocal all_results - all_results.append(result) - - if len(all_results) <= 4: - ctx.continue_as_new(max(all_results), save_events=True) - else: - return all_results - - # Start a worker, which will connect to the sidecar in a background thread - with worker.TaskHubGrpcWorker() as w: - w.add_orchestrator(orchestrator) - w.start() - - task_hub_client = client.TaskHubGrpcClient() - id = task_hub_client.schedule_new_orchestration(orchestrator, input=0) - task_hub_client.raise_orchestration_event(id, "my_event", data=1) - task_hub_client.raise_orchestration_event(id, "my_event", data=2) - task_hub_client.raise_orchestration_event(id, "my_event", data=3) - task_hub_client.raise_orchestration_event(id, "my_event", data=4) - task_hub_client.raise_orchestration_event(id, "my_event", data=5) - - state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) - assert state is not None - assert state.runtime_status == client.OrchestrationStatus.COMPLETED - assert state.serialized_output == json.dumps(all_results) - assert state.serialized_input == json.dumps(4) - assert all_results == [1, 2, 3, 4, 5] - - -# NOTE: This test fails when running against durabletask-go with sqlite because the sqlite backend does not yet -# support orchestration ID reuse. This gap is being tracked here: -# https://github.com/microsoft/durabletask-go/issues/42 -def test_retry_policies(): - # This test verifies that the retry policies are working as expected. - # It does this by creating an orchestration that calls a sub-orchestrator, - # which in turn calls an activity that always fails. - # In this test, the retry policies are added, and the orchestration - # should still fail. But, number of times the sub-orchestrator and activity - # is called should increase as per the retry policies. - - child_orch_counter = 0 - throw_activity_counter = 0 - - # Second setup: With retry policies - retry_policy = task.RetryPolicy( - first_retry_interval=timedelta(seconds=1), - max_number_of_attempts=3, - backoff_coefficient=1, - max_retry_interval=timedelta(seconds=10), - retry_timeout=timedelta(seconds=30)) - - def parent_orchestrator_with_retry(ctx: task.OrchestrationContext, _): - yield ctx.call_sub_orchestrator(child_orchestrator_with_retry, retry_policy=retry_policy) - - def child_orchestrator_with_retry(ctx: task.OrchestrationContext, _): - nonlocal child_orch_counter - if not ctx.is_replaying: - # NOTE: Real orchestrations should never interact with nonlocal variables like this. - # This is done only for testing purposes. - child_orch_counter += 1 - yield ctx.call_activity(throw_activity_with_retry, retry_policy=retry_policy) - - def throw_activity_with_retry(ctx: task.ActivityContext, _): - nonlocal throw_activity_counter - throw_activity_counter += 1 - raise RuntimeError("Kah-BOOOOM!!!") - - with worker.TaskHubGrpcWorker() as w: - w.add_orchestrator(parent_orchestrator_with_retry) - w.add_orchestrator(child_orchestrator_with_retry) - w.add_activity(throw_activity_with_retry) - w.start() - - task_hub_client = client.TaskHubGrpcClient() - id = task_hub_client.schedule_new_orchestration(parent_orchestrator_with_retry) - state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) - assert state is not None - assert state.runtime_status == client.OrchestrationStatus.FAILED - assert state.failure_details is not None - assert state.failure_details.error_type == "TaskFailedError" - assert state.failure_details.message.startswith("Sub-orchestration task #1 failed:") - assert state.failure_details.message.endswith("Activity task #1 failed: Kah-BOOOOM!!!") - assert state.failure_details.stack_trace is not None - assert throw_activity_counter == 9 - assert child_orch_counter == 3 - - -def test_retry_timeout(): - # This test verifies that the retry timeout is working as expected. - # Max number of attempts is 5 and retry timeout is 14 seconds. - # Total seconds consumed till 4th attempt is 1 + 2 + 4 + 8 = 15 seconds. - # So, the 5th attempt should not be made and the orchestration should fail. - throw_activity_counter = 0 - retry_policy = task.RetryPolicy( - first_retry_interval=timedelta(seconds=1), - max_number_of_attempts=5, - backoff_coefficient=2, - max_retry_interval=timedelta(seconds=10), - retry_timeout=timedelta(seconds=14)) - - def mock_orchestrator(ctx: task.OrchestrationContext, _): - yield ctx.call_activity(throw_activity, retry_policy=retry_policy) - - def throw_activity(ctx: task.ActivityContext, _): - nonlocal throw_activity_counter - throw_activity_counter += 1 - raise RuntimeError("Kah-BOOOOM!!!") - - with worker.TaskHubGrpcWorker() as w: - w.add_orchestrator(mock_orchestrator) - w.add_activity(throw_activity) - w.start() - - task_hub_client = client.TaskHubGrpcClient() - id = task_hub_client.schedule_new_orchestration(mock_orchestrator) - state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) - assert state is not None - assert state.runtime_status == client.OrchestrationStatus.FAILED - assert state.failure_details is not None - assert state.failure_details.error_type == "TaskFailedError" - assert state.failure_details.message.endswith("Activity task #1 failed: Kah-BOOOOM!!!") - assert state.failure_details.stack_trace is not None - assert throw_activity_counter == 4 - -def test_custom_status(): - - def empty_orchestrator(ctx: task.OrchestrationContext, _): - ctx.set_custom_status("foobaz") - - # Start a worker, which will connect to the sidecar in a background thread - with worker.TaskHubGrpcWorker() as w: - w.add_orchestrator(empty_orchestrator) - w.start() - - c = client.TaskHubGrpcClient() - id = c.schedule_new_orchestration(empty_orchestrator) - state = c.wait_for_orchestration_completion(id, timeout=30) - - assert state is not None - assert state.name == task.get_name(empty_orchestrator) - assert state.instance_id == id - assert state.failure_details is None - assert state.runtime_status == client.OrchestrationStatus.COMPLETED - assert state.serialized_input is None - assert state.serialized_output is None - assert state.serialized_custom_status is "\"foobaz\"" diff --git a/tox.ini b/tox.ini new file mode 100644 index 00000000..b6bc7bab --- /dev/null +++ b/tox.ini @@ -0,0 +1,32 @@ +[tox] +skipsdist = True +minversion = 3.10.0 +envlist = + py{310,311,312,313,314} + ruff, + mypy, +# TODO: switch runner to uv (tox-uv plugin) +runner = virtualenv + +[testenv] +# you can run tox with the e2e pytest marker using tox factors: +# tox -e py310-e2e +# to use custom grpc endpoint: +# DAPR_GRPC_ENDPOINT=localhost:12345 tox -e py310-e2e +setenv = + PYTHONDONTWRITEBYTECODE=1 +deps = .[dev] +commands = + !e2e: pytest -m "not e2e" --verbose + e2e: pytest -m e2e --verbose +commands_pre = + pip3 install -e {toxinidir}/ +allowlist_externals = pip3 +pass_env = DAPR_GRPC_ENDPOINT,DAPR_HTTP_ENDPOINT,DAPR_RUNTIME_HOST,DAPR_GRPC_PORT,DAPR_HTTP_PORT + +[testenv:ruff] +basepython = python3 +usedevelop = False +commands = + ruff check --fix + ruff format